diff --git a/.bzrignore b/.bzrignore index 4af6fdbbe88..951a811f8e4 100644 --- a/.bzrignore +++ b/.bzrignore @@ -549,13 +549,67 @@ mysql-max-4.0.2-alpha-pc-linux-gnu-i686.tar.gz mysql-test/gmon.out mysql-test/install_test_db mysql-test/mysql-test-run +mysql-test/mysql-test-run.log mysql-test/mysql_test_run_new mysql-test/ndb/ndbcluster mysql-test/r/*.reject +mysql-test/r/alter_table.err +mysql-test/r/archive.err +mysql-test/r/bdb-alter-table-1.err +mysql-test/r/bdb-alter-table-2.err +mysql-test/r/bdb-crash.err +mysql-test/r/bdb-deadlock.err +mysql-test/r/bdb.err +mysql-test/r/bdb_cache.err +mysql-test/r/client_test.err +mysql-test/r/csv.err +mysql-test/r/ctype_ucs.err +mysql-test/r/derived.err +mysql-test/r/exampledb.err +mysql-test/r/func_encrypt.err mysql-test/r/index_merge_load.result +mysql-test/r/isam.err +mysql-test/r/lowercase_table2.err +mysql-test/r/multi_update.err +mysql-test/r/mysql_protocols.err +mysql-test/r/mysqlbinlog.err +mysql-test/r/mysqlbinlog2.err +mysql-test/r/mysqldump.err +mysql-test/r/mysqltest.err +mysql-test/r/ndb_alter_table.err +mysql-test/r/ndb_autodiscover.err +mysql-test/r/ndb_autodiscover2.err +mysql-test/r/ndb_basic.err +mysql-test/r/ndb_blob.err +mysql-test/r/ndb_cache.err +mysql-test/r/ndb_charset.err +mysql-test/r/ndb_index.err +mysql-test/r/ndb_index_ordered.err +mysql-test/r/ndb_index_unique.err +mysql-test/r/ndb_insert.err +mysql-test/r/ndb_limit.err +mysql-test/r/ndb_lock.err +mysql-test/r/ndb_minmax.err +mysql-test/r/ndb_replace.err +mysql-test/r/ndb_subquery.err +mysql-test/r/ndb_transaction.err +mysql-test/r/ndb_truncate.err +mysql-test/r/ndb_types.err +mysql-test/r/ndb_update.err +mysql-test/r/openssl_1.err +mysql-test/r/ps_1general.err +mysql-test/r/ps_6bdb.err +mysql-test/r/ps_7ndb.err +mysql-test/r/query_cache.err +mysql-test/r/query_cache_merge.err +mysql-test/r/raid.err +mysql-test/r/repair.err +mysql-test/r/replace.err +mysql-test/r/rpl000001.err mysql-test/r/rpl000001.eval mysql-test/r/rpl000002.eval mysql-test/r/rpl000014.eval +mysql-test/r/rpl000015.err mysql-test/r/rpl000015.eval mysql-test/r/rpl000016.eval mysql-test/r/rpl_log.eval @@ -698,6 +752,8 @@ ndb/examples/ndbapi_example2/ndbapi_example2 ndb/examples/ndbapi_example3/ndbapi_example3 ndb/examples/ndbapi_example5/ndbapi_example5 ndb/examples/select_all/select_all +ndb/include/ndb_global.h +ndb/include/ndb_version.h ndb/lib/libMGM_API.so ndb/lib/libNDB_API.so ndb/lib/libNDB_ODBC.so diff --git a/BUILD/compile-dist b/BUILD/compile-dist new file mode 100755 index 00000000000..7c177f54258 --- /dev/null +++ b/BUILD/compile-dist @@ -0,0 +1,48 @@ +#!/bin/sh +# +# This script's purpose is to update the automake/autoconf helper scripts and +# to run a plain "configure" without any special compile flags. Only features +# that affect the content of the source distribution are enabled. The resulting +# tree can then be picked up by "make dist" to create the "pristine source +# package" that is used as the basis for all other binary builds. +# +make distclean +aclocal +autoheader +libtoolize --automake --force --copy +automake --force --add-missing --copy +autoconf +(cd bdb/dist && sh s_all) +(cd innobase && aclocal && autoheader && aclocal && automake && autoconf) + +# Default to gcc for CC and CXX +if test -z "$CXX" ; then + export CXX=gcc +fi + +if test -z "$CC" ; then + export CC=gcc +fi + +# Use ccache, if available +if ccache -V > /dev/null 2>&1 +then + if ! (echo "$CC" | grep "ccache" > /dev/null) + then + export CC="ccache $CC" + fi + if ! (echo "$CXX" | grep "ccache" > /dev/null) + then + export CXX="ccache $CXX" + fi +fi + +# Make sure to enable all features that affect "make dist" +./configure \ + --with-embedded-server \ + --with-berkeley-db \ + --with-innodb \ + --enable-thread-safe-client \ + --with-extra-charsets=complex \ + --with-ndbcluster +make diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 2b3339e59d3..0f4bc8d7b37 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -34,6 +34,7 @@ bk@mysql.r18.ru brian@avenger.(none) brian@brian-akers-computer.local brian@private-client-ip-101.oz.net +brian@zim.(none) carsten@tsort.bitbybit.dk cps@silver_beast.(none) davida@isil.mysql.com @@ -96,6 +97,7 @@ jcole@tetra.spaceapes.com jimw@mysql.com joerg@mysql.com jon@gigan. +jonas@mysql.com joreland@bk-internal.mysql.com joreland@mysql.com jorge@linux.jorge.mysql.com @@ -175,6 +177,7 @@ paul@frost.snake.net paul@ice.local paul@ice.snake.net paul@kite-hub.kitebird.com +paul@snake-hub.snake.net paul@teton.kitebird.com pekka@mysql.com pem@mysql.com diff --git a/Build-tools/Bootstrap b/Build-tools/Bootstrap index a7d347ba32f..10211dbb59c 100755 --- a/Build-tools/Bootstrap +++ b/Build-tools/Bootstrap @@ -26,7 +26,7 @@ else } # Some predefined settings -$build_command= "BUILD/compile-pentium-max"; +$build_command= "BUILD/compile-dist"; $PWD= cwd(); $opt_docdir= $PWD . "/mysqldoc"; $opt_archive_log= undef; @@ -70,7 +70,7 @@ GetOptions( "test|t", "verbose|v", "win-dist|w", - "quiet|q", + "quiet|q", ) || print_help(""); # @@ -122,18 +122,8 @@ if (($opt_directory ne $PWD) && (!-d $opt_directory && !$opt_dry_run)) # if ($opt_pull) { - &logger("Updating BK tree $REPO to latest ChangeSet first"); - chdir ($REPO) or &abort("Could not chdir to $REPO!"); - &run_command("bk pull", "Could not update $REPO!"); - chdir ($PWD) or &abort("Could not chdir to $PWD!"); - - unless ($opt_skip_manual) - { - &logger("Updating manual tree in $opt_docdir"); - chdir ($opt_docdir) or &abort("Could not chdir to $opt_docdir!"); - &run_command("bk pull", "Could not update $opt_docdir!"); - chdir ($PWD) or &abort("Could not chdir to $PWD!"); - } + &bk_pull("$REPO"); + &bk_pull("$opt_docdir") unless ($opt_skip_manual); } # @@ -262,7 +252,7 @@ if (defined $opt_changelog) $msg= "Adding $target_dir/ChangeLog"; $msg.= " (down to revision $opt_changelog)" if $opt_changelog ne ""; &logger($msg); - $command= "bk changes -mv"; + $command= "bk changes -v"; $command.= " -r" if ($opt_changelog ne "" || $opt_revision); $command.= $opt_changelog if $opt_changelog ne ""; $command.= ".." if ($opt_changelog ne "" && !$opt_revision); @@ -270,7 +260,7 @@ if (defined $opt_changelog) $command.= " " . $REPO . " > $target_dir/ChangeLog"; &logger($command); # We cannot use run_command here because of output redirection - if (!$opt_dry_run) + unless ($opt_dry_run) { system($command) == 0 or &abort("Could not create $target_dir/ChangeLog!"); } @@ -281,17 +271,17 @@ if (defined $opt_changelog) # unless ($opt_skip_manual) { - $msg= "Updating manual files"; - &logger($msg); + &logger("Updating manual files"); foreach $file qw/internals manual reservedwords/ { system ("bk cat $opt_docdir/Docs/$file.texi > $target_dir/Docs/$file.texi") == 0 or &abort("Could not update $file.texi in $target_dir/Docs/!"); } - system ("rm -f $target_dir/Docs/Images/Makefile*") == 0 - or &abort("Could not remove Makefiles in $target_dir/Docs/Images/!"); - system ("cp $opt_docdir/Docs/Images/*.* $target_dir/Docs/Images") == 0 - or &abort("Could not copy image files in $target_dir/Docs/Images/!"); + + &run_command("rm -f $target_dir/Docs/Images/Makefile*", + "Could not remove Makefiles in $target_dir/Docs/Images/!"); + &run_command("cp $opt_docdir/Docs/Images/*.* $target_dir/Docs/Images", + "Could not copy image files in $target_dir/Docs/Images/!"); } # @@ -377,6 +367,18 @@ if ($opt_archive_log) exit 0; +# +# Run a BK pull on the given BK tree +# +sub bk_pull +{ + my $bk_tree= $_[0]; + &logger("Updating BK tree $bk_tree to latest ChangeSet first"); + chdir ($bk_tree) or &abort("Could not chdir to $bk_tree!"); + &run_command("bk pull", "Could not update $bk_tree!"); + chdir ($PWD) or &abort("Could not chdir to $PWD!"); +} + # # Print the help text message (with an optional message on top) # diff --git a/Makefile.am b/Makefile.am index 56c52824071..ef7f8937d86 100644 --- a/Makefile.am +++ b/Makefile.am @@ -23,14 +23,14 @@ EXTRA_DIST = INSTALL-SOURCE README COPYING EXCEPTIONS-CLIENT SUBDIRS = . include @docs_dirs@ @zlib_dir@ \ @readline_topdir@ sql-common \ @thread_dirs@ pstack \ - @sql_server_dirs@ @sql_client_dirs@ scripts man tests \ + @sql_union_dirs@ scripts man tests \ netware @libmysqld_dirs@ \ @bench_dirs@ support-files @fs_dirs@ @tools_dirs@ DIST_SUBDIRS = . include @docs_dirs@ zlib \ @readline_topdir@ sql-common \ @thread_dirs@ pstack \ - @sql_server_dirs@ @sql_client_dirs@ scripts @man_dirs@ tests SSL\ + @sql_union_dirs@ scripts @man_dirs@ tests SSL\ BUILD netware os2 @libmysqld_dirs@ \ @bench_dirs@ support-files @fs_dirs@ @tools_dirs@ diff --git a/VC++Files/client/mysqldump.dsp b/VC++Files/client/mysqldump.dsp index a1ebdfe11a6..3c955639596 100644 --- a/VC++Files/client/mysqldump.dsp +++ b/VC++Files/client/mysqldump.dsp @@ -51,8 +51,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=xilink6.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 -# ADD LINK32 mysqlclient.lib wsock32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 /out:"../client_release/mysqldump.exe" /libpath:"..\lib_release\\" +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib mysys.lib /nologo /subsystem:console /machine:I386 +# ADD LINK32 mysqlclient.lib wsock32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib mysys.lib /nologo /subsystem:console /machine:I386 /out:"../client_release/mysqldump.exe" /libpath:"..\lib_release\\" !ELSEIF "$(CFG)" == "mysqldump - Win32 Debug" @@ -76,8 +76,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=xilink6.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept -# ADD LINK32 mysqlclient.lib wsock32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /incremental:no /debug /machine:I386 /out:"../client_debug/mysqldump.exe" /pdbtype:sept /libpath:"..\lib_debug\\" +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib mysys.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept +# ADD LINK32 mysqlclient.lib wsock32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib mysys.lib /nologo /subsystem:console /incremental:no /debug /machine:I386 /out:"../client_debug/mysqldump.exe" /pdbtype:sept /libpath:"..\lib_debug\\" !ELSEIF "$(CFG)" == "mysqldump - Win32 classic" @@ -103,8 +103,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=xilink6.exe -# ADD BASE LINK32 mysqlclient.lib wsock32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 /out:"../client_release/mysqldump.exe" /libpath:"..\lib_release\\" -# ADD LINK32 mysqlclient.lib wsock32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386 /out:"../client_classic/mysqldump.exe" /libpath:"..\lib_release\\" +# ADD BASE LINK32 mysqlclient.lib wsock32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib mysys.lib /nologo /subsystem:console /machine:I386 /out:"../client_release/mysqldump.exe" /libpath:"..\lib_release\\" +# ADD LINK32 mysqlclient.lib wsock32.lib kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib mysys.lib /nologo /subsystem:console /machine:I386 /out:"../client_classic/mysqldump.exe" /libpath:"..\lib_release\\" !ENDIF diff --git a/VC++Files/client/mysqltest.dsp b/VC++Files/client/mysqltest.dsp index 1f1613026a9..d04dc5bfce8 100644 --- a/VC++Files/client/mysqltest.dsp +++ b/VC++Files/client/mysqltest.dsp @@ -67,8 +67,8 @@ LINK32=link.exe # PROP Output_Dir ".\classic" # PROP Intermediate_Dir ".\classic" # PROP Target_Dir "" -# ADD BASE CPP /nologo /MT /I "../include" /I "../" /W3 /Ob1 /G6 /D "_CONSOLE" /D "_WINDOWS" /D "LICENSE=Commercial" /D "DBUG_OFF" /D "NDEBUG" /D "_MBCS" /GF /Gy /Fp".\classic/mysqltest.pch" /Fo".\classic/" /Fd".\classic/" /c /GX -# ADD CPP /nologo /MT /I "../include" /I "../" /W3 /Ob1 /G6 /D "_CONSOLE" /D "_WINDOWS" /D "LICENSE=Commercial" /D "DBUG_OFF" /D "NDEBUG" /D "_MBCS" /GF /Gy /Fp".\classic/mysqltest.pch" /Fo".\classic/" /Fd".\classic/" /c /GX +# ADD BASE CPP /nologo /MT /I "../include" /I "../regex" /I "../" /W3 /Ob1 /G6 /D "_CONSOLE" /D "_WINDOWS" /D "LICENSE=Commercial" /D "DBUG_OFF" /D "NDEBUG" /D "_MBCS" /GF /Gy /Fp".\classic/mysqltest.pch" /Fo".\classic/" /Fd".\classic/" /c /GX +# ADD CPP /nologo /MT /I "../include" /I "../regex" /I "../" /W3 /Ob1 /G6 /D "_CONSOLE" /D "_WINDOWS" /D "LICENSE=Commercial" /D "DBUG_OFF" /D "NDEBUG" /D "_MBCS" /GF /Gy /Fp".\classic/mysqltest.pch" /Fo".\classic/" /Fd".\classic/" /c /GX # ADD BASE MTL /nologo /tlb".\classic\mysqltest.tlb" /win32 # ADD MTL /nologo /tlb".\classic\mysqltest.tlb" /win32 # ADD BASE RSC /l 1033 /d "NDEBUG" @@ -77,8 +77,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib odbc32.lib odbccp32.lib mysqlclient.lib wsock32.lib /nologo /out:"..\client_classic\mysqltest.exe" /incremental:no /libpath:"..\lib_release\" /pdb:".\classic\mysqltest.pdb" /pdbtype:sept /subsystem:console /MACHINE:I386 -# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib odbc32.lib odbccp32.lib mysqlclient.lib wsock32.lib /nologo /out:"..\client_classic\mysqltest.exe" /incremental:no /libpath:"..\lib_release\" /pdb:".\classic\mysqltest.pdb" /pdbtype:sept /subsystem:console /MACHINE:I386 +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib odbc32.lib odbccp32.lib mysqlclient.lib wsock32.lib mysys.lib regex.lib /nologo /out:"..\client_classic\mysqltest.exe" /incremental:no /libpath:"..\lib_release\" /pdb:".\classic\mysqltest.pdb" /pdbtype:sept /subsystem:console /MACHINE:I386 +# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib odbc32.lib odbccp32.lib mysqlclient.lib wsock32.lib mysys.lib regex.lib /nologo /out:"..\client_classic\mysqltest.exe" /incremental:no /libpath:"..\lib_release\" /pdb:".\classic\mysqltest.pdb" /pdbtype:sept /subsystem:console /MACHINE:I386 !ELSEIF "$(CFG)" == "mysqltest - Win32 Release" @@ -92,8 +92,8 @@ LINK32=link.exe # PROP Output_Dir ".\release" # PROP Intermediate_Dir ".\release" # PROP Target_Dir "" -# ADD BASE CPP /nologo /MT /I "../include" /I "../" /W3 /Ob1 /G6 /D "DBUG_OFF" /D "_CONSOLE" /D "_WINDOWS" /D "NDEBUG" /D "_MBCS" /GF /Gy /Fp".\release/mysqltest.pch" /Fo".\release/" /Fd".\release/" /c /GX -# ADD CPP /nologo /MT /I "../include" /I "../" /W3 /Ob1 /G6 /D "DBUG_OFF" /D "_CONSOLE" /D "_WINDOWS" /D "NDEBUG" /D "_MBCS" /GF /Gy /Fp".\release/mysqltest.pch" /Fo".\release/" /Fd".\release/" /c /GX +# ADD BASE CPP /nologo /MT /I "../include" /I "../regex" /I "../" /W3 /Ob1 /G6 /D "DBUG_OFF" /D "_CONSOLE" /D "_WINDOWS" /D "NDEBUG" /D "_MBCS" /GF /Gy /Fp".\release/mysqltest.pch" /Fo".\release/" /Fd".\release/" /c /GX +# ADD CPP /nologo /MT /I "../include" /I "../regex" /I "../" /W3 /Ob1 /G6 /D "DBUG_OFF" /D "_CONSOLE" /D "_WINDOWS" /D "NDEBUG" /D "_MBCS" /GF /Gy /Fp".\release/mysqltest.pch" /Fo".\release/" /Fd".\release/" /c /GX # ADD BASE MTL /nologo /tlb".\release\mysqltest.tlb" /win32 # ADD MTL /nologo /tlb".\release\mysqltest.tlb" /win32 # ADD BASE RSC /l 1033 /d "NDEBUG" @@ -102,8 +102,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib odbc32.lib odbccp32.lib mysqlclient.lib wsock32.lib /nologo /out:"..\client_release\mysqltest.exe" /incremental:no /libpath:"..\lib_release\" /pdb:".\release\mysqltest.pdb" /pdbtype:sept /subsystem:console /MACHINE:I386 -# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib odbc32.lib odbccp32.lib mysqlclient.lib wsock32.lib /nologo /out:"..\client_release\mysqltest.exe" /incremental:no /libpath:"..\lib_release\" /pdb:".\release\mysqltest.pdb" /pdbtype:sept /subsystem:console /MACHINE:I386 +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib odbc32.lib odbccp32.lib mysqlclient.lib wsock32.lib mysys.lib regex.lib /nologo /out:"..\client_release\mysqltest.exe" /incremental:no /libpath:"..\lib_release\" /pdb:".\release\mysqltest.pdb" /pdbtype:sept /subsystem:console /MACHINE:I386 +# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib odbc32.lib odbccp32.lib mysqlclient.lib wsock32.lib mysys.lib regex.lib /nologo /out:"..\client_release\mysqltest.exe" /incremental:no /libpath:"..\lib_release\" /pdb:".\release\mysqltest.pdb" /pdbtype:sept /subsystem:console /MACHINE:I386 !ENDIF diff --git a/VC++Files/mysql-test/mysql_test_run_new.dsp b/VC++Files/mysql-test/mysql_test_run_new.dsp index 7e43da20b26..61392b00b94 100644 --- a/VC++Files/mysql-test/mysql_test_run_new.dsp +++ b/VC++Files/mysql-test/mysql_test_run_new.dsp @@ -76,8 +76,8 @@ BSC32=bscmake.exe # ADD BASE BSC32 /nologo # ADD BSC32 /nologo LINK32=link.exe -# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib odbc32.lib odbccp32.lib Ws2_32.lib /nologo /out:"..\mysql-test\mysql_test_run_new.exe" /incremental:no /pdb:".\Release\mysql_test_run_new.pdb" /pdbtype:sept /subsystem:windows -# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib odbc32.lib odbccp32.lib Ws2_32.lib /nologo /out:"..\mysql-test\mysql_test_run_new.exe" /incremental:no /pdb:".\Release\mysql_test_run_new.pdb" /pdbtype:sept /subsystem:windows +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib odbc32.lib odbccp32.lib Ws2_32.lib /nologo /out:"..\mysql-test\mysql_test_run_new.exe" /incremental:no /pdb:".\Release\mysql_test_run_new.pdb" /pdbtype:sept /subsystem:console +# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib odbc32.lib odbccp32.lib Ws2_32.lib /nologo /out:"..\mysql-test\mysql_test_run_new.exe" /incremental:no /pdb:".\Release\mysql_test_run_new.pdb" /pdbtype:sept /subsystem:console !ENDIF diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc index 21e8f6ab3e4..e62ec8d7cea 100644 --- a/client/mysqladmin.cc +++ b/client/mysqladmin.cc @@ -33,7 +33,8 @@ #define SHUTDOWN_DEF_TIMEOUT 3600 /* Wait for shutdown */ #define MAX_TRUNC_LENGTH 3 -char *host= NULL, *user= 0, *opt_password= 0; +char *host= NULL, *user= 0, *opt_password= 0, + *default_charset= NULL; char truncated_var_names[MAX_MYSQL_VAR][MAX_TRUNC_LENGTH]; char ex_var_names[MAX_MYSQL_VAR][FN_REFLEN]; ulonglong last_values[MAX_MYSQL_VAR]; @@ -145,6 +146,9 @@ static struct my_option my_long_options[] = {"character-sets-dir", OPT_CHARSETS_DIR, "Directory where character sets are.", (gptr*) &charsets_dir, (gptr*) &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"default-character-set", OPT_DEFAULT_CHARSET, + "Set the default character set.", (gptr*) &default_charset, + (gptr*) &default_charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"help", '?', "Display this help and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"host", 'h', "Connect to host.", (gptr*) &host, (gptr*) &host, 0, GET_STR, @@ -343,6 +347,8 @@ int main(int argc,char *argv[]) if (shared_memory_base_name) mysql_options(&mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name); #endif + if (default_charset) + mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, default_charset); if (sql_connect(&mysql, option_wait)) { unsigned int err= mysql_errno(&mysql); @@ -827,13 +833,39 @@ static int execute_commands(MYSQL *mysql,int argc, char **argv) if (argv[1][0]) { char *pw= argv[1]; + bool old= find_type(argv[0], &command_typelib, 2) == ADMIN_OLD_PASSWORD; #ifdef __WIN__ uint pw_len= strlen(pw); if (pw_len > 1 && pw[0] == '\'' && pw[pw_len-1] == '\'') printf("Warning: single quotes were not trimmed from the password by" " your command\nline client, as you might have expected.\n"); #endif - if (find_type(argv[0], &command_typelib, 2) == ADMIN_OLD_PASSWORD) + /* + If we don't already know to use an old-style password, see what + the server is using + */ + if (!old) { + if (mysql_query(mysql, "SHOW VARIABLES LIKE 'old_passwords'")) { + my_printf_error(0, "Could not determine old_passwords setting from server; error: '%s'", + MYF(ME_BELL),mysql_error(mysql)); + return -1; + } else { + MYSQL_RES *res= mysql_store_result(mysql); + if (!res) { + my_printf_error(0, "Could not get old_passwords setting from server; error: '%s'", + MYF(ME_BELL),mysql_error(mysql)); + return -1; + } + if (!mysql_num_rows(res)) { + old= 1; + } else { + MYSQL_ROW row= mysql_fetch_row(res); + old= !strncmp(row[1], "ON", 2); + } + mysql_free_result(res); + } + } + if (old) make_scrambled_password_323(crypted_pw, pw); else make_scrambled_password(crypted_pw, pw); diff --git a/client/mysqldump.c b/client/mysqldump.c index 76330a98b9c..876e4ef55e1 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -106,7 +106,14 @@ FILE *md_result_file; static char *shared_memory_base_name=0; #endif static uint opt_protocol= 0; -static char *default_charset= (char*) MYSQL_UNIVERSAL_CLIENT_CHARSET; +/* + Constant for detection of default value of default_charset. + If default_charset is equal to mysql_universal_client_charset, then + it is the default value which assigned at the very beginning of main(). +*/ +static const char *mysql_universal_client_charset= + MYSQL_UNIVERSAL_CLIENT_CHARSET; +static char *default_charset; static CHARSET_INFO *charset_info= &my_charset_latin1; const char *default_dbug_option="d:t:o,/tmp/mysqldump.trace"; /* do we met VIEWs during tables scaning */ @@ -682,7 +689,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), Set charset to the default compiled value if it hasn't been reset yet by --default-character-set=xxx. */ - if (default_charset == (char*) MYSQL_UNIVERSAL_CLIENT_CHARSET) + if (default_charset == mysql_universal_client_charset) default_charset= (char*) MYSQL_DEFAULT_CHARSET_NAME; break; } @@ -2610,6 +2617,7 @@ static my_bool getViewStructure(char *table, char* db) int main(int argc, char **argv) { compatible_mode_normal_str[0]= 0; + default_charset= (char *)mysql_universal_client_charset; MY_INIT(argv[0]); if (get_options(&argc, &argv)) diff --git a/config/ac-macros/ha_ndbcluster.m4 b/config/ac-macros/ha_ndbcluster.m4 index aea6f37ae2a..433bba95e91 100644 --- a/config/ac-macros/ha_ndbcluster.m4 +++ b/config/ac-macros/ha_ndbcluster.m4 @@ -36,11 +36,6 @@ AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [ ;; esac - AC_ARG_WITH([ndb-shm], - [ - --with-ndb-shm Include the NDB Cluster shared memory transporter], - [ndb_shm="$withval"], - [ndb_shm=no]) AC_ARG_WITH([ndb-test], [ --with-ndb-test Include the NDB Cluster ndbapi test programs], @@ -61,23 +56,15 @@ AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [ --with-ndb-port-base Base port for NDB Cluster transporters], [ndb_port_base="$withval"], [ndb_port_base="default"]) + AC_ARG_WITH([ndb-debug], + [ + --without-ndb-debug Disable special ndb debug features], + [ndb_debug="$withval"], + [ndb_debug="default"]) AC_MSG_CHECKING([for NDB Cluster options]) AC_MSG_RESULT([]) - have_ndb_shm=no - case "$ndb_shm" in - yes ) - AC_MSG_RESULT([-- including shared memory transporter]) - AC_DEFINE([NDB_SHM_TRANSPORTER], [1], - [Including Ndb Cluster DB shared memory transporter]) - have_ndb_shm="yes" - ;; - * ) - AC_MSG_RESULT([-- not including shared memory transporter]) - ;; - esac - have_ndb_test=no case "$ndb_test" in yes ) @@ -100,6 +87,24 @@ AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [ ;; esac + case "$ndb_debug" in + yes ) + AC_MSG_RESULT([-- including ndb extra debug options]) + have_ndb_debug="yes" + ;; + full ) + AC_MSG_RESULT([-- including ndb extra extra debug options]) + have_ndb_debug="full" + ;; + no ) + AC_MSG_RESULT([-- not including ndb extra debug options]) + have_ndb_debug="no" + ;; + * ) + have_ndb_debug="default" + ;; + esac + AC_MSG_RESULT([done.]) ]) diff --git a/configure.in b/configure.in index 47ac06706b2..52906514c89 100644 --- a/configure.in +++ b/configure.in @@ -2450,7 +2450,7 @@ thread_dirs= dnl This probably should be cleaned up more - for now the threaded dnl client is just using plain-old libs. -sql_client_dirs="libmysql client" +sql_client_dirs="libmysql strings regex client" linked_client_targets="linked_libmysql_sources" CLIENT_LIBS=$NON_THREADED_CLIENT_LIBS if test "$THREAD_SAFE_CLIENT" != "no" @@ -2624,6 +2624,20 @@ AC_SUBST(sql_server_dirs) AC_SUBST(thread_dirs) AC_SUBST(server_scripts) +# Now that sql_client_dirs and sql_server_dirs are stable, determine the union. +# Start with the (longer) server list, add each client item not yet present. +sql_union_dirs=" $sql_server_dirs " +for DIR in $sql_client_dirs +do + if echo $sql_union_dirs | grep " $DIR " >/dev/null + then + : # already present, skip + else + sql_union_dirs="$sql_union_dirs $DIR " + fi +done +AC_SUBST(sql_union_dirs) + #if test "$with_posix_threads" = "no" -o "$with_mit_threads" = "yes" #then # MIT pthreads does now support connecting with unix sockets diff --git a/include/config-win.h b/include/config-win.h index 1d54a4bf9ec..cb2c072d056 100644 --- a/include/config-win.h +++ b/include/config-win.h @@ -332,6 +332,9 @@ inline double ulonglong2double(ulonglong value) #define SHAREDIR "share" #define DEFAULT_CHARSET_HOME "C:/mysql/" #endif +#ifndef DEFAULT_HOME_ENV +#define DEFAULT_HOME_ENV MYSQL_HOME +#endif /* File name handling */ diff --git a/include/my_pthread.h b/include/my_pthread.h index b483b68d5cb..57240788e2f 100644 --- a/include/my_pthread.h +++ b/include/my_pthread.h @@ -631,6 +631,7 @@ extern int pthread_dummy(int); /* All thread specific variables are in the following struct */ #define THREAD_NAME_SIZE 10 +#ifndef DEFAULT_THREAD_STACK #if defined(__ia64__) /* MySQL can survive with 32K, but some glibc libraries require > 128K stack @@ -640,6 +641,7 @@ extern int pthread_dummy(int); #else #define DEFAULT_THREAD_STACK (192*1024) #endif +#endif struct st_my_thread_var { diff --git a/include/myisam.h b/include/myisam.h index fd75af2d997..e0eb8715aef 100644 --- a/include/myisam.h +++ b/include/myisam.h @@ -340,8 +340,8 @@ typedef struct st_mi_check_param ha_checksum key_crc[MI_MAX_POSSIBLE_KEY]; ulong rec_per_key_part[MI_MAX_KEY_SEG*MI_MAX_POSSIBLE_KEY]; void *thd; - char *db_name,*table_name; - char *op_name; + const char *db_name, *table_name; + const char *op_name; } MI_CHECK; typedef struct st_sort_ft_buf diff --git a/innobase/include/ut0ut.h b/innobase/include/ut0ut.h index dee8785c9e7..8938957cd12 100644 --- a/innobase/include/ut0ut.h +++ b/innobase/include/ut0ut.h @@ -139,6 +139,14 @@ ib_time_t ut_time(void); /*=========*/ /************************************************************** +Returns system time. */ + +void +ut_usectime( +/*========*/ + ulint* sec, /* out: seconds since the Epoch */ + ulint* ms); /* out: microseconds since the Epoch+*sec */ +/************************************************************** Returns the difference of two times in seconds. */ double diff --git a/innobase/log/log0recv.c b/innobase/log/log0recv.c index 5eefd32c8a6..35dc9a06020 100644 --- a/innobase/log/log0recv.c +++ b/innobase/log/log0recv.c @@ -3040,8 +3040,7 @@ recv_reset_log_files_for_backup( memcpy(name + log_dir_len, logfilename, sizeof logfilename); buf = ut_malloc(LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE); - memset(buf, LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE, '\0'); - + memset(buf, '\0', LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE); for (i = 0; i < n_log_files; i++) { diff --git a/innobase/mtr/mtr0log.c b/innobase/mtr/mtr0log.c index 417093134c3..4f826f242e8 100644 --- a/innobase/mtr/mtr0log.c +++ b/innobase/mtr/mtr0log.c @@ -443,7 +443,8 @@ mlog_open_and_write_index( type = dict_col_get_type(dict_field_get_col(field)); len = field->fixed_len; ut_ad(len < 0x7fff); - if (len == 0 && dtype_get_len(type) > 255) { + if (len == 0 && (dtype_get_len(type) > 255 + || dtype_get_mtype(type) == DATA_BLOB)) { /* variable-length field with maximum length > 255 */ len = 0x7fff; diff --git a/innobase/os/os0file.c b/innobase/os/os0file.c index e0d822b016f..ed3c0e53c12 100644 --- a/innobase/os/os0file.c +++ b/innobase/os/os0file.c @@ -1767,7 +1767,21 @@ os_file_flush( #else int ret; -#ifdef HAVE_FDATASYNC +#ifdef HAVE_DARWIN_THREADS + /* Apple has disabled fsync() for internal disk drives in OS X. That + caused corruption for a user when he tested a power outage. Let us in + OS X use a nonstandard flush method recommended by an Apple + engineer. */ + + ret = fcntl(file, F_FULLFSYNC, NULL); + + if (ret) { + /* If we are not on a file system that supports this, then + fall back to a plain fsync. */ + + ret = fsync(file); + } +#elif HAVE_FDATASYNC ret = fdatasync(file); #else /* fprintf(stderr, "Flushing to file %p\n", file); */ diff --git a/innobase/srv/srv0srv.c b/innobase/srv/srv0srv.c index 61bddc9da2c..15dacdf6333 100644 --- a/innobase/srv/srv0srv.c +++ b/innobase/srv/srv0srv.c @@ -346,10 +346,10 @@ static ulint srv_n_rows_updated_old = 0; static ulint srv_n_rows_deleted_old = 0; static ulint srv_n_rows_read_old = 0; -ulint srv_n_lock_wait_count= 0; -ulint srv_n_lock_wait_current_count= 0; -ib_longlong srv_n_lock_wait_time= 0; -ulint srv_n_lock_max_wait_time= 0; +ulint srv_n_lock_wait_count = 0; +ulint srv_n_lock_wait_current_count = 0; +ib_longlong srv_n_lock_wait_time = 0; +ulint srv_n_lock_max_wait_time = 0; /* @@ -1384,10 +1384,11 @@ srv_suspend_mysql_thread( trx_t* trx; ibool had_dict_lock = FALSE; ibool was_declared_inside_innodb = FALSE; - ib_longlong start_time, finish_time; - ulint diff_time; - ulint sec; - ulint ms; + ib_longlong start_time = 0; + ib_longlong finish_time; + ulint diff_time; + ulint sec; + ulint ms; #ifdef UNIV_SYNC_DEBUG ut_ad(!mutex_own(&kernel_mutex)); @@ -1430,15 +1431,14 @@ srv_suspend_mysql_thread( os_event_reset(event); slot->suspend_time = ut_time(); - if (thr->lock_state == QUE_THR_LOCK_ROW) - { - srv_n_lock_wait_count++; - srv_n_lock_wait_current_count++; - ut_usectime(&sec, &ms); - start_time= (ib_longlong)sec * 1000000 + ms; + if (thr->lock_state == QUE_THR_LOCK_ROW) { + srv_n_lock_wait_count++; + srv_n_lock_wait_current_count++; - } + ut_usectime(&sec, &ms); + start_time = (ib_longlong)sec * 1000000 + ms; + } /* Wake the lock timeout monitor thread, if it is suspended */ os_event_set(srv_lock_timeout_thread_event); @@ -1490,20 +1490,18 @@ srv_suspend_mysql_thread( wait_time = ut_difftime(ut_time(), slot->suspend_time); - if (thr->lock_state == QUE_THR_LOCK_ROW) - { - ut_usectime(&sec, &ms); - finish_time= (ib_longlong)sec * 1000000 + ms; + if (thr->lock_state == QUE_THR_LOCK_ROW) { + ut_usectime(&sec, &ms); + finish_time = (ib_longlong)sec * 1000000 + ms; + + diff_time = finish_time - start_time; - diff_time= finish_time-start_time; - - srv_n_lock_wait_current_count--; - srv_n_lock_wait_time= srv_n_lock_wait_time + diff_time; - if (diff_time > srv_n_lock_max_wait_time) - { - srv_n_lock_max_wait_time= diff_time; - } - } + srv_n_lock_wait_current_count--; + srv_n_lock_wait_time = srv_n_lock_wait_time + diff_time; + if (diff_time > srv_n_lock_max_wait_time) { + srv_n_lock_max_wait_time = diff_time; + } + } if (trx->was_chosen_as_deadlock_victim) { diff --git a/innobase/sync/sync0sync.c b/innobase/sync/sync0sync.c index 317455f57a3..788965f82ef 100644 --- a/innobase/sync/sync0sync.c +++ b/innobase/sync/sync0sync.c @@ -368,7 +368,7 @@ mutex_spin_wait( { ulint index; /* index of the reserved wait cell */ ulint i; /* spin round count */ - ib_longlong lstart_time, lfinish_time; /* for timing os_wait */ + ib_longlong lstart_time = 0, lfinish_time; /* for timing os_wait */ ulint ltime_diff; ulint sec; ulint ms; diff --git a/innobase/ut/ut0ut.c b/innobase/ut/ut0ut.c index 21c2833b6d6..f35b4dea5e0 100644 --- a/innobase/ut/ut0ut.c +++ b/innobase/ut/ut0ut.c @@ -74,18 +74,18 @@ ut_time(void) } /************************************************************** -Returns system time. We do not specify the format of the time returned: -the only way to manipulate it is to use the function ut_difftime. */ +Returns system time. */ void -ut_usectime(ulint* sec, ulint* ms) -/*=========*/ +ut_usectime( +/*========*/ + ulint* sec, /* out: seconds since the Epoch */ + ulint* ms) /* out: microseconds since the Epoch+*sec */ { - struct timeval tv; - gettimeofday(&tv,NULL); - *sec = (ulint) tv.tv_sec; - *ms = (ulint) tv.tv_usec; - return; + struct timeval tv; + gettimeofday(&tv,NULL); + *sec = (ulint) tv.tv_sec; + *ms = (ulint) tv.tv_usec; } /************************************************************** diff --git a/libmysql/Makefile.shared b/libmysql/Makefile.shared index 9664fb0abef..c7ce0273707 100644 --- a/libmysql/Makefile.shared +++ b/libmysql/Makefile.shared @@ -81,6 +81,7 @@ CLEANFILES = $(target_libadd) $(SHLIBOBJS) \ $(target) DEFS = -DDEFAULT_CHARSET_HOME="\"$(MYSQLBASEdir)\"" \ -DDATADIR="\"$(MYSQLDATAdir)\"" \ + -DDEFAULT_HOME_ENV=MYSQL_HOME \ -DSHAREDIR="\"$(MYSQLSHAREdir)\"" $(target_defs) # The automatic dependencies miss this diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index b791090346b..29c6f469098 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -1865,12 +1865,14 @@ my_bool cli_read_prepare_result(MYSQL *mysql, MYSQL_STMT *stmt) { uchar *pos; uint field_count, param_count; + ulong packet_length; MYSQL_DATA *fields_data; - DBUG_ENTER("read_prepare_result"); + DBUG_ENTER("cli_read_prepare_result"); mysql= mysql->last_used_con; - if (net_safe_read(mysql) == packet_error) + if ((packet_length= net_safe_read(mysql)) == packet_error) DBUG_RETURN(1); + mysql->warning_count= 0; pos= (uchar*) mysql->net.read_pos; stmt->stmt_id= uint4korr(pos+1); pos+= 5; @@ -1878,6 +1880,8 @@ my_bool cli_read_prepare_result(MYSQL *mysql, MYSQL_STMT *stmt) field_count= uint2korr(pos); pos+= 2; /* Number of placeholders in the statement */ param_count= uint2korr(pos); pos+= 2; + if (packet_length >= 12) + mysql->warning_count= uint2korr(pos+1); if (param_count != 0) { @@ -1894,7 +1898,6 @@ my_bool cli_read_prepare_result(MYSQL *mysql, MYSQL_STMT *stmt) if (!(mysql->server_status & SERVER_STATUS_AUTOCOMMIT)) mysql->server_status|= SERVER_STATUS_IN_TRANS; - mysql->extra_info= net_field_length_ll(&pos); if (!(fields_data= (*mysql->methods->read_rows)(mysql,(MYSQL_FIELD*)0,7))) DBUG_RETURN(1); if (!(stmt->fields= unpack_fields(fields_data,&stmt->mem_root, @@ -1902,9 +1905,10 @@ my_bool cli_read_prepare_result(MYSQL *mysql, MYSQL_STMT *stmt) mysql->server_capabilities))) DBUG_RETURN(1); } - stmt->field_count= (uint) field_count; + stmt->field_count= field_count; stmt->param_count= (ulong) param_count; - mysql->warning_count= 0; + DBUG_PRINT("exit",("field_count: %u param_count: %u warning_count: %u", + field_count, param_count, (uint) mysql->warning_count)); DBUG_RETURN(0); } diff --git a/man/Makefile.am b/man/Makefile.am index 539c43dfed6..8d18165191a 100644 --- a/man/Makefile.am +++ b/man/Makefile.am @@ -17,11 +17,11 @@ ## Process this file with automake to create Makefile.in -man_MANS = mysql.1 isamchk.1 isamlog.1 mysql_zap.1 mysqlaccess.1 \ +man_MANS = mysql.1 mysql_zap.1 mysqlaccess.1 \ mysqladmin.1 mysqld.1 mysqld_multi.1 mysqldump.1 mysqlshow.1 \ perror.1 replace.1 mysqld_safe.1 mysql_fix_privilege_tables.1 -EXTRA_DIST = mysql.1.in isamchk.1.in isamlog.1.in mysql_zap.1.in \ +EXTRA_DIST = mysql.1.in mysql_zap.1.in \ mysqlaccess.1.in mysqladmin.1.in mysqld.1.in mysqld_multi.1.in \ mysqldump.1.in mysqlshow.1.in perror.1.in replace.1.in mysqlman.1.in \ mysqld_safe.1.in mysql_fix_privilege_tables.1.in diff --git a/man/isamchk.1.in b/man/isamchk.1.in deleted file mode 100644 index cad1303ee55..00000000000 --- a/man/isamchk.1.in +++ /dev/null @@ -1,145 +0,0 @@ -.TH isamchk 1 "19 December 2000" "MySQL @MYSQL_BASE_VERSION@" "MySQL database" -.SH NAME -.BR isamchk - \- Description, check and repair of ISAM tables. -Used without options all tables on the command will be checked for errors -.SH USAGE -isamchk [OPTIONS] tables[.ISM] -.SH SYNOPSIS -.B isamchk -.RB [ \-a | \-\-analyze ] -.RB [ \-# | \-\-debug=... ] -.RB [ \-\-character\-sets\-dir=...] -.RB [ \-C | \-\-default\-character\-set=...] -.RB [ \-d | \-\-description ] -.RB [ \-e | \-\-extend\-check ] -.RB [ \-f | \-\-force ] -.RB [ \-? | \-\-help ] -.RB [ \-i | \-\-information ] -.RB [ \-k | \-\-keys\-used=# ] -.RB [ \-l | \-\-no\-symlinks] -.RB [ \-q | \-\-quick ] -.RB [ \-r | \-\-recover ] -.RB [ \-o | \-\-safe\-recover ] -.RB [ \-O | "\-\-set\-variable var=option"] -.RB [ \-s | \-\-silent ] -.RB [ \-S | \-\-sort\-index ] -.RB [ \-R | \-\-sort\-records=#] -.RB [ \-u | \-\-unpack ] -.RB [ \-v | \-\-verbose ] -.RB [ \-V | \-\-version ] -.RB [ \-w | \-\-wait ] -.SH DESCRIPTION -.TP -.BR \-a | \-\-analyze -Analyze distribution of keys. Will make some joins in -MySQL faster. -.TP -.BR \-# | \-\-debug=... -Output debug log. Often this is 'd:t:o ,filename` -.TP -.BR \-\-character\-sets\-dir=... -Directory where character sets are -.TP -.BR \-C | \-\-default\-character\-set=... -Set the default character set -.TP -.BR \-d | \-\-description -Prints some information about table. -.TP -.BR \-e | \-\-extend\-check -Check the table VERY thoroughly. One need use this -only in extreme cases as isamchk should normally find -all errors even without this switch -.TP -.BR \-f | \-\-force -Overwrite old temporary files. -If one uses \-f when checking tables (running isamchk -without \-r), isamchk will automatically restart with -\-r on any wrong table. -.TP -.BR \-? | \-\-help -Display help and exit. -.TP -.BR \-i | \-\-information -Print statistics information about the table -.TP -.BR \-k | \-\-keys\-used=# -Used with '\-r'. Tell ISAM to update only the first -# keys. This can be used to get faster inserts! -.TP -.BR \-l | \-\-no\-symlinks -Do not follow symbolic links when repairing. Normally -isamchk repairs the table a symlink points at. -.TP -.BR \-q | \-\-quick -Used with \-r to get a faster repair. (The data file -isn't touched.) One can give a second '\-q' to force -isamchk to modify the original datafile. -.TP -.BR \-r | \-\-recover -Can fix almost anything except unique keys that aren't -unique. -.TP -.BR \-o | \-\-safe\-recover -Uses old recovery method; slower than '\-r' but can -handle a couple of cases that '\-r' cannot handle. -.TP -.BR \-O | " \-\-set\-variable var=option " -Change the value of a variable. -.TP -.BR \-s | \-\-silent -Only print errors. One can use two \-s to make isamchk -very silent -.TP -.BR \-S | \-\-sort\-index -Sort index blocks. This speeds up 'read\-next' in -applications -.TP -.BR \-R | \-\-sort\-records=# -Sort records according to an index. This makes your -data much more localized and may speed up things -(It may be VERY slow to do a sort the first time!) -.TP -.BR \-u | \-\-unpack -Unpack file packed with pack_isam. -.TP -.BR \-v | \-\-verbose -Print more information. This can be used with -\-d and \-e. Use many \-v for more verbosity! -.TP -.BR \-V | \-\-version -Print version and exit. -.TP -.BR \-w | \-\-wait -Wait if table is locked. -.SH "SEE ALSO" -isamlog(1), -mysql(1), -mysqlaccess(1), -mysqladmin(1), -mysqld(1), -mysqld_multi(1), -mysqld_safe(1), -mysqldump(1), -mysql_fix_privilege_tables(1), -mysqlshow(1), -mysql_zap(1), -perror(1), -replace(1) -.P -For more information please refer to the MySQL reference -manual, which may already be installed locally and which -is also available online at http://www.mysql.com/doc/en/ -.SH BUGS -Please refer to http://bugs.mysql.com/ to report bugs. -.SH AUTHOR -Ver 1.0, distribution @MYSQL_NO_DASH_VERSION@ -Michael (Monty) Widenius (monty@mysql.com), -MySQL AB (http://www.mysql.com/). -This software comes with no warranty. -Manual page by L. (Kill-9) Pedersen -(kill-9@kill\-9.dk), Mercurmedia Data Model Architect / -system developer (http://www.mercurmedia.com) - -.\" end of man page diff --git a/man/isamlog.1.in b/man/isamlog.1.in deleted file mode 100644 index 6040f6c4ad8..00000000000 --- a/man/isamlog.1.in +++ /dev/null @@ -1,107 +0,0 @@ -.TH isamlog 1 "19 December 2000" "MySQL @MYSQL_BASE_VERSION@" "MySQL database" -.SH NAME -isamlog - Write info about whats in a nisam log file. -.SH USAGE -isamlog [-?iruvIV] [-c #] [-f #] [-F filepath/] [-o #] [-R file recordpos] [-w write_file] [log-filename [table ...]] -.SH SYNOPSIS -.B isamlog -.RB [ -? | -I ] -.RB [ -V ] -.RB [ -c ] -.RB [ -f ] -.RB [ -F ] -.RB [ -i ] -.RB [ -o ] -.RB [ "-p #" ] -.RB [ -r ] -.RB [ -R ] -.RB [ -u ] -.RB [ -v ] -.RB [ -w ] -.SH DESCRIPTION -.TP -.BR isamlog -.TP -.BR -? | -I -info -.TP -.BR -V -version -.TP -.BR -c -do only # commands -.TP -.BR -f -max open files -.TP -.BR -F -file path -.TP -.BR -i -extra info -.TP -.BR -o -offset -.TP -.BR "-p #" -remove # components from path -.TP -.BR -r -recover -.TP -.BR -R -file recordposition -.TP -.BR -u -update -.TP -.BR -v -verbose -.TP -.BR -w -write file -.SH NOTE -If no file name is given isam.log is used -One can give a second and a third '-v' for more verbose. -Normaly one does a update (-u). -If a recover is done all writes and all possibly updates and deletes is done -and errors are only counted. -If one gives table names as arguments only these tables will be updated - - - -.SH "SEE ALSO" -isamchk(1), -mysql(1), -mysqlaccess(1), -mysqladmin(1), -mysqld(1), -mysqld_multi(1), -mysqld_safe(1), -mysqldump(1), -mysql_fix_privilege_tables(1), -mysqlshow(1), -mysql_zap(1), -perror(1), -replace(1) -.P -For more information please refer to the MySQL reference -manual, which may already be installed locally and which -is also available online at http://www.mysql.com/doc/en/ -.SH BUGS -Please refer to http://bugs.mysql.com/ to report bugs. - -.SH AUTHOR - -Ver 1.0, distribution @MYSQL_NO_DASH_VERSION@ -Michael (Monty) Widenius (monty@mysql.com), -MySQL AB (http://www.mysql.com/). -This software comes with no warranty. -Manual page by L. (Kill-9) Pedersen -(kill-9@kill-9.dk), Mercurmedia Data Model Architect / -system developer (http://www.mercurmedia.com) - - -.\" end of man page - - diff --git a/myisam/mi_check.c b/myisam/mi_check.c index dcf57e78fa4..dd8cc736741 100644 --- a/myisam/mi_check.c +++ b/myisam/mi_check.c @@ -1467,12 +1467,14 @@ static int writekeys(MI_CHECK *param, register MI_INFO *info, byte *buff, if (_mi_ft_add(info,i,(char*) key,buff,filepos)) goto err; } +#ifdef HAVE_SPATIAL else if (info->s->keyinfo[i].flag & HA_SPATIAL) { uint key_length=_mi_make_key(info,i,key,buff,filepos); if (rtree_insert(info, i, key, key_length)) goto err; } +#endif /*HAVE_SPATIAL*/ else { uint key_length=_mi_make_key(info,i,key,buff,filepos); diff --git a/mysql-test/Makefile.am b/mysql-test/Makefile.am index d718935cca8..3d5833c47be 100644 --- a/mysql-test/Makefile.am +++ b/mysql-test/Makefile.am @@ -52,7 +52,7 @@ dist-hook: $(INSTALL_DATA) $(srcdir)/std_data/*.dat $(srcdir)/std_data/*.000001 $(distdir)/std_data $(INSTALL_DATA) $(srcdir)/std_data/des_key_file $(distdir)/std_data $(INSTALL_DATA) $(srcdir)/std_data/*.pem $(distdir)/std_data - + $(INSTALL_DATA) $(srcdir)/std_data/*.frm $(distdir)/std_data install-data-local: $(mkinstalldirs) \ @@ -73,6 +73,7 @@ install-data-local: $(INSTALL_DATA) $(srcdir)/std_data/des_key_file $(DESTDIR)$(testdir)/std_data $(INSTALL_DATA) $(srcdir)/std_data/Moscow_leap $(DESTDIR)$(testdir)/std_data $(INSTALL_DATA) $(srcdir)/std_data/*.pem $(DESTDIR)$(testdir)/std_data + $(INSTALL_DATA) $(srcdir)/std_data/*.frm $(DESTDIR)$(testdir)/std_data std_data/%.pem: @CP@ $(top_srcdir)/SSL/$(@F) $(srcdir)/std_data diff --git a/mysql-test/lib/init_db.sql b/mysql-test/lib/init_db.sql new file mode 100644 index 00000000000..f42f7ca6b5f --- /dev/null +++ b/mysql-test/lib/init_db.sql @@ -0,0 +1,54 @@ +USE mysql; + +CREATE TABLE db (Host char(60) binary DEFAULT '' NOT NULL,Db char(64) binary DEFAULT '' NOT NULL,User char(16) binary DEFAULT '' NOT NULL,Select_priv enum('N','Y') DEFAULT 'N' NOT NULL,Insert_priv enum('N','Y') DEFAULT 'N' NOT NULL,Update_priv enum('N','Y') DEFAULT 'N' NOT NULL,Delete_priv enum('N','Y') DEFAULT 'N' NOT NULL,Create_priv enum('N','Y') DEFAULT 'N' NOT NULL,Drop_priv enum('N','Y') DEFAULT 'N' NOT NULL,Grant_priv enum('N','Y') DEFAULT 'N' NOT NULL,References_priv enum('N','Y') DEFAULT 'N' NOT NULL,Index_priv enum('N','Y') DEFAULT 'N' NOT NULL,Alter_priv enum('N','Y') DEFAULT 'N' NOT NULL,Create_tmp_table_priv enum('N','Y') DEFAULT 'N' NOT NULL,Lock_tables_priv enum('N','Y') DEFAULT 'N' NOT NULL,PRIMARY KEY Host (Host,Db,User),KEY User (User)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges'; + +INSERT INTO db VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y'); +INSERT INTO db VALUES ('%','test\_%','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y'); + + +CREATE TABLE host (Host char(60) binary DEFAULT '' NOT NULL,Db char(64) binary DEFAULT '' NOT NULL,Select_priv enum('N','Y') DEFAULT 'N' NOT NULL,Insert_priv enum('N','Y') DEFAULT 'N' NOT NULL,Update_priv enum('N','Y') DEFAULT 'N' NOT NULL,Delete_priv enum('N','Y') DEFAULT 'N' NOT NULL,Create_priv enum('N','Y') DEFAULT 'N' NOT NULL,Drop_priv enum('N','Y') DEFAULT 'N' NOT NULL,Grant_priv enum('N','Y') DEFAULT 'N' NOT NULL,References_priv enum('N','Y') DEFAULT 'N' NOT NULL,Index_priv enum('N','Y') DEFAULT 'N' NOT NULL,Alter_priv enum('N','Y') DEFAULT 'N' NOT NULL,Create_tmp_table_priv enum('N','Y') DEFAULT 'N' NOT NULL,Lock_tables_priv enum('N','Y') DEFAULT 'N' NOT NULL,PRIMARY KEY Host (Host,Db)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges'; + +CREATE TABLE user (Host char(60) binary DEFAULT '' NOT NULL,User char(16) binary DEFAULT '' NOT NULL,Password char(41) binary DEFAULT '' NOT NULL,Select_priv enum('N','Y') DEFAULT 'N' NOT NULL,Insert_priv enum('N','Y') DEFAULT 'N' NOT NULL,Update_priv enum('N','Y') DEFAULT 'N' NOT NULL,Delete_priv enum('N','Y') DEFAULT 'N' NOT NULL,Create_priv enum('N','Y') DEFAULT 'N' NOT NULL,Drop_priv enum('N','Y') DEFAULT 'N' NOT NULL,Reload_priv enum('N','Y') DEFAULT 'N' NOT NULL,Shutdown_priv enum('N','Y') DEFAULT 'N' NOT NULL,Process_priv enum('N','Y') DEFAULT 'N' NOT NULL,File_priv enum('N','Y') DEFAULT 'N' NOT NULL,Grant_priv enum('N','Y') DEFAULT 'N' NOT NULL,References_priv enum('N','Y') DEFAULT 'N' NOT NULL,Index_priv enum('N','Y') DEFAULT 'N' NOT NULL,Alter_priv enum('N','Y') DEFAULT 'N' NOT NULL,Show_db_priv enum('N','Y') DEFAULT 'N' NOT NULL,Super_priv enum('N','Y') DEFAULT 'N' NOT NULL,Create_tmp_table_priv enum('N','Y') DEFAULT 'N' NOT NULL,Lock_tables_priv enum('N','Y') DEFAULT 'N' NOT NULL,Execute_priv enum('N','Y') DEFAULT 'N' NOT NULL,Repl_slave_priv enum('N','Y') DEFAULT 'N' NOT NULL,Repl_client_priv enum('N','Y') DEFAULT 'N' NOT NULL,ssl_type enum('','ANY','X509', 'SPECIFIED') DEFAULT '' NOT NULL,ssl_cipher BLOB NOT NULL,x509_issuer BLOB NOT NULL,x509_subject BLOB NOT NULL,max_questions int(11) unsigned DEFAULT 0 NOT NULL,max_updates int(11) unsigned DEFAULT 0 NOT NULL,max_connections int(11) unsigned DEFAULT 0 NOT NULL,PRIMARY KEY Host (Host,User)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges'; + +INSERT INTO user VALUES ('%','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0); +INSERT INTO user VALUES ('localhost','','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0); +INSERT INTO user VALUES ('%','','','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','N','','','','',0,0,0); + +CREATE TABLE func (name char(64) binary DEFAULT '' NOT NULL,ret tinyint(1) DEFAULT '0' NOT NULL,dl char(128) DEFAULT '' NOT NULL,type enum ('function','aggregate') NOT NULL,PRIMARY KEY (name)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User defined functions'; + +CREATE TABLE tables_priv (Host char(60) binary DEFAULT '' NOT NULL,Db char(64) binary DEFAULT '' NOT NULL,User char(16) binary DEFAULT '' NOT NULL,Table_name char(64) binary DEFAULT '' NOT NULL,Grantor char(77) DEFAULT '' NOT NULL,Timestamp timestamp(14),Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter') DEFAULT '' NOT NULL,Column_priv set('Select','Insert','Update','References') DEFAULT '' NOT NULL,PRIMARY KEY (Host,Db,User,Table_name),KEY Grantor (Grantor)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges'; + +CREATE TABLE columns_priv (Host char(60) binary DEFAULT '' NOT NULL,Db char(64) binary DEFAULT '' NOT NULL,User char(16) binary DEFAULT '' NOT NULL,Table_name char(64) binary DEFAULT '' NOT NULL,Column_name char(64) binary DEFAULT '' NOT NULL,Timestamp timestamp(14),Column_priv set('Select','Insert','Update','References') DEFAULT '' NOT NULL,PRIMARY KEY (Host,Db,User,Table_name,Column_name)) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges'; + +CREATE TABLE help_topic (help_topic_id int unsigned not null,name varchar(64) not null,help_category_id smallint unsigned not null,description text not null,example text not null,url varchar(128) not null,primary key (help_topic_id),unique index (name)) engine=MyISAM CHARACTER SET utf8 comment='help topics'; + +CREATE TABLE help_category (help_category_id smallint unsigned not null,name varchar(64) not null,parent_category_id smallint unsigned null,url varchar(128) not null,primary key (help_category_id),unique index (name)) engine=MyISAM CHARACTER SET utf8 comment='help categories'; + +CREATE TABLE help_keyword (help_keyword_id int unsigned not null,name varchar(64) not null,primary key (help_keyword_id),unique index (name)) engine=MyISAM CHARACTER SET utf8 comment='help keywords'; + +CREATE TABLE help_relation (help_topic_id int unsigned not null references help_topic,help_keyword_id int unsigned not null references help_keyword,primary key (help_keyword_id, help_topic_id)) engine=MyISAM CHARACTER SET utf8 comment='keyword-topic relation'; + +CREATE TABLE time_zone_name (Name char(64) NOT NULL,Time_zone_id int unsigned NOT NULL,PRIMARY KEY Name (Name)) engine=MyISAM CHARACTER SET utf8 comment='Time zone names'; + +INSERT INTO time_zone_name (Name, Time_Zone_id) VALUES ('MET', 1), ('UTC', 2), ('Universal', 2), ('Europe/Moscow',3), ('leap/Europe/Moscow',4), ('Japan', 5); + + +CREATE TABLE time_zone (Time_zone_id int unsigned NOT NULL auto_increment,Use_leap_seconds enum('Y','N') DEFAULT 'N' NOT NULL,PRIMARY KEY TzId (Time_zone_id)) engine=MyISAM CHARACTER SET utf8 comment='Time zones'; + +INSERT INTO time_zone (Time_zone_id, Use_leap_seconds) VALUES (1,'N'), (2,'N'), (3,'N'), (4,'Y'), (5,'N'); + + +CREATE TABLE time_zone_transition (Time_zone_id int unsigned NOT NULL,Transition_time bigint signed NOT NULL,Transition_type_id int unsigned NOT NULL,PRIMARY KEY TzIdTranTime (Time_zone_id, Transition_time)) engine=MyISAM CHARACTER SET utf8 comment='Time zone transitions'; + +INSERT INTO time_zone_transition (Time_zone_id, Transition_time, Transition_type_id) VALUES (1, -1693706400, 0) ,(1, -1680483600, 1),(1, -1663455600, 2) ,(1, -1650150000, 3),(1, -1632006000, 2) ,(1, -1618700400, 3),(1, -938905200, 2) ,(1, -857257200, 3),(1, -844556400, 2) ,(1, -828226800, 3),(1, -812502000, 2) ,(1, -796777200, 3),(1, 228877200, 2) ,(1, 243997200, 3),(1, 260326800, 2) ,(1, 276051600, 3),(1, 291776400, 2) ,(1, 307501200, 3),(1, 323830800, 2) ,(1, 338950800, 3),(1, 354675600, 2) ,(1, 370400400, 3),(1, 386125200, 2) ,(1, 401850000, 3),(1, 417574800, 2) ,(1, 433299600, 3),(1, 449024400, 2) ,(1, 465354000, 3),(1, 481078800, 2) ,(1, 496803600, 3),(1, 512528400, 2) ,(1, 528253200, 3),(1, 543978000, 2) ,(1, 559702800, 3),(1, 575427600, 2) ,(1, 591152400, 3),(1, 606877200, 2) ,(1, 622602000, 3),(1, 638326800, 2) ,(1, 654656400, 3),(1, 670381200, 2) ,(1, 686106000, 3),(1, 701830800, 2) ,(1, 717555600, 3),(1, 733280400, 2) ,(1, 749005200, 3),(1, 764730000, 2) ,(1, 780454800, 3),(1, 796179600, 2) ,(1, 811904400, 3),(1, 828234000, 2) ,(1, 846378000, 3),(1, 859683600, 2) ,(1, 877827600, 3),(1, 891133200, 2) ,(1, 909277200, 3),(1, 922582800, 2) ,(1, 941331600, 3),(1, 954032400, 2) ,(1, 972781200, 3),(1, 985482000, 2) ,(1, 1004230800, 3),(1, 1017536400, 2) ,(1, 1035680400, 3),(1, 1048986000, 2) ,(1, 1067130000, 3),(1, 1080435600, 2) ,(1, 1099184400, 3),(1, 1111885200, 2) ,(1, 1130634000, 3),(1, 1143334800, 2) ,(1, 1162083600, 3),(1, 1174784400, 2) ,(1, 1193533200, 3),(1, 1206838800, 2) ,(1, 1224982800, 3),(1, 1238288400, 2) ,(1, 1256432400, 3),(1, 1269738000, 2) ,(1, 1288486800, 3),(1, 1301187600, 2) ,(1, 1319936400, 3),(1, 1332637200, 2) ,(1, 1351386000, 3),(1, 1364691600, 2) ,(1, 1382835600, 3),(1, 1396141200, 2) ,(1, 1414285200, 3),(1, 1427590800, 2) ,(1, 1445734800, 3),(1, 1459040400, 2) ,(1, 1477789200, 3),(1, 1490490000, 2) ,(1, 1509238800, 3),(1, 1521939600, 2) ,(1, 1540688400, 3),(1, 1553994000, 2) ,(1, 1572138000, 3),(1, 1585443600, 2) ,(1, 1603587600, 3),(1, 1616893200, 2) ,(1, 1635642000, 3),(1, 1648342800, 2) ,(1, 1667091600, 3),(1, 1679792400, 2) ,(1, 1698541200, 3),(1, 1711846800, 2) ,(1, 1729990800, 3),(1, 1743296400, 2) ,(1, 1761440400, 3),(1, 1774746000, 2) ,(1, 1792890000, 3),(1, 1806195600, 2) ,(1, 1824944400, 3),(1, 1837645200, 2) ,(1, 1856394000, 3),(1, 1869094800, 2) ,(1, 1887843600, 3),(1, 1901149200, 2) ,(1, 1919293200, 3),(1, 1932598800, 2) ,(1, 1950742800, 3),(1, 1964048400, 2) ,(1, 1982797200, 3),(1, 1995498000, 2) ,(1, 2014246800, 3),(1, 2026947600, 2) ,(1, 2045696400, 3),(1, 2058397200, 2) ,(1, 2077146000, 3),(1, 2090451600, 2) ,(1, 2108595600, 3),(1, 2121901200, 2) ,(1, 2140045200, 3),(3, -1688265000, 2) ,(3, -1656819048, 1),(3, -1641353448, 2) ,(3, -1627965048, 3),(3, -1618716648, 1) ,(3, -1596429048, 3),(3, -1593829848, 5) ,(3, -1589860800, 4),(3, -1542427200, 5) ,(3, -1539493200, 6),(3, -1525323600, 5) ,(3, -1522728000, 4),(3, -1491188400, 7) ,(3, -1247536800, 4),(3, 354920400, 5) ,(3, 370728000, 4),(3, 386456400, 5) ,(3, 402264000, 4),(3, 417992400, 5) ,(3, 433800000, 4),(3, 449614800, 5) ,(3, 465346800, 8),(3, 481071600, 9) ,(3, 496796400, 8),(3, 512521200, 9) ,(3, 528246000, 8),(3, 543970800, 9) ,(3, 559695600, 8),(3, 575420400, 9) ,(3, 591145200, 8),(3, 606870000, 9) ,(3, 622594800, 8),(3, 638319600, 9) ,(3, 654649200, 8),(3, 670374000, 10) ,(3, 686102400, 11),(3, 695779200, 8) ,(3, 701812800, 5),(3, 717534000, 4) ,(3, 733273200, 9),(3, 748998000, 8) ,(3, 764722800, 9),(3, 780447600, 8) ,(3, 796172400, 9),(3, 811897200, 8) ,(3, 828226800, 9),(3, 846370800, 8) ,(3, 859676400, 9),(3, 877820400, 8) ,(3, 891126000, 9),(3, 909270000, 8) ,(3, 922575600, 9),(3, 941324400, 8) ,(3, 954025200, 9),(3, 972774000, 8) ,(3, 985474800, 9),(3, 1004223600, 8) ,(3, 1017529200, 9),(3, 1035673200, 8) ,(3, 1048978800, 9),(3, 1067122800, 8) ,(3, 1080428400, 9),(3, 1099177200, 8) ,(3, 1111878000, 9),(3, 1130626800, 8) ,(3, 1143327600, 9),(3, 1162076400, 8) ,(3, 1174777200, 9),(3, 1193526000, 8) ,(3, 1206831600, 9),(3, 1224975600, 8) ,(3, 1238281200, 9),(3, 1256425200, 8) ,(3, 1269730800, 9),(3, 1288479600, 8) ,(3, 1301180400, 9),(3, 1319929200, 8) ,(3, 1332630000, 9),(3, 1351378800, 8) ,(3, 1364684400, 9),(3, 1382828400, 8) ,(3, 1396134000, 9),(3, 1414278000, 8) ,(3, 1427583600, 9),(3, 1445727600, 8) ,(3, 1459033200, 9),(3, 1477782000, 8) ,(3, 1490482800, 9),(3, 1509231600, 8) ,(3, 1521932400, 9),(3, 1540681200, 8) ,(3, 1553986800, 9),(3, 1572130800, 8) ,(3, 1585436400, 9),(3, 1603580400, 8) ,(3, 1616886000, 9),(3, 1635634800, 8) ,(3, 1648335600, 9),(3, 1667084400, 8) ,(3, 1679785200, 9),(3, 1698534000, 8) ,(3, 1711839600, 9),(3, 1729983600, 8) ,(3, 1743289200, 9),(3, 1761433200, 8) ,(3, 1774738800, 9),(3, 1792882800, 8) ,(3, 1806188400, 9),(3, 1824937200, 8) ,(3, 1837638000, 9),(3, 1856386800, 8) ,(3, 1869087600, 9),(3, 1887836400, 8) ,(3, 1901142000, 9),(3, 1919286000, 8) ,(3, 1932591600, 9),(3, 1950735600, 8) ,(3, 1964041200, 9),(3, 1982790000, 8) ,(3, 1995490800, 9),(3, 2014239600, 8) ,(3, 2026940400, 9),(3, 2045689200, 8) ,(3, 2058390000, 9),(3, 2077138800, 8) ,(3, 2090444400, 9),(3, 2108588400, 8) ,(3, 2121894000, 9),(3, 2140038000, 8),(4, -1688265000, 2) ,(4, -1656819048, 1),(4, -1641353448, 2) ,(4, -1627965048, 3),(4, -1618716648, 1) ,(4, -1596429048, 3),(4, -1593829848, 5) ,(4, -1589860800, 4),(4, -1542427200, 5) ,(4, -1539493200, 6),(4, -1525323600, 5) ,(4, -1522728000, 4),(4, -1491188400, 7) ,(4, -1247536800, 4),(4, 354920409, 5) ,(4, 370728010, 4),(4, 386456410, 5) ,(4, 402264011, 4),(4, 417992411, 5) ,(4, 433800012, 4),(4, 449614812, 5) ,(4, 465346812, 8),(4, 481071612, 9) ,(4, 496796413, 8),(4, 512521213, 9) ,(4, 528246013, 8),(4, 543970813, 9) ,(4, 559695613, 8),(4, 575420414, 9) ,(4, 591145214, 8),(4, 606870014, 9) ,(4, 622594814, 8),(4, 638319615, 9) ,(4, 654649215, 8),(4, 670374016, 10) ,(4, 686102416, 11),(4, 695779216, 8) ,(4, 701812816, 5),(4, 717534017, 4) ,(4, 733273217, 9),(4, 748998018, 8) ,(4, 764722818, 9),(4, 780447619, 8) ,(4, 796172419, 9),(4, 811897219, 8) ,(4, 828226820, 9),(4, 846370820, 8) ,(4, 859676420, 9),(4, 877820421, 8) ,(4, 891126021, 9),(4, 909270021, 8) ,(4, 922575622, 9),(4, 941324422, 8) ,(4, 954025222, 9),(4, 972774022, 8) ,(4, 985474822, 9),(4, 1004223622, 8) ,(4, 1017529222, 9),(4, 1035673222, 8) ,(4, 1048978822, 9),(4, 1067122822, 8) ,(4, 1080428422, 9),(4, 1099177222, 8) ,(4, 1111878022, 9),(4, 1130626822, 8) ,(4, 1143327622, 9),(4, 1162076422, 8) ,(4, 1174777222, 9),(4, 1193526022, 8) ,(4, 1206831622, 9),(4, 1224975622, 8) ,(4, 1238281222, 9),(4, 1256425222, 8) ,(4, 1269730822, 9),(4, 1288479622, 8) ,(4, 1301180422, 9),(4, 1319929222, 8) ,(4, 1332630022, 9),(4, 1351378822, 8) ,(4, 1364684422, 9),(4, 1382828422, 8) ,(4, 1396134022, 9),(4, 1414278022, 8) ,(4, 1427583622, 9),(4, 1445727622, 8) ,(4, 1459033222, 9),(4, 1477782022, 8) ,(4, 1490482822, 9),(4, 1509231622, 8) ,(4, 1521932422, 9),(4, 1540681222, 8) ,(4, 1553986822, 9),(4, 1572130822, 8) ,(4, 1585436422, 9),(4, 1603580422, 8) ,(4, 1616886022, 9),(4, 1635634822, 8) ,(4, 1648335622, 9),(4, 1667084422, 8) ,(4, 1679785222, 9),(4, 1698534022, 8) ,(4, 1711839622, 9),(4, 1729983622, 8) ,(4, 1743289222, 9),(4, 1761433222, 8) ,(4, 1774738822, 9),(4, 1792882822, 8) ,(4, 1806188422, 9),(4, 1824937222, 8) ,(4, 1837638022, 9),(4, 1856386822, 8) ,(4, 1869087622, 9),(4, 1887836422, 8) ,(4, 1901142022, 9),(4, 1919286022, 8) ,(4, 1932591622, 9),(4, 1950735622, 8) ,(4, 1964041222, 9),(4, 1982790022, 8) ,(4, 1995490822, 9),(4, 2014239622, 8) ,(4, 2026940422, 9),(4, 2045689222, 8) ,(4, 2058390022, 9),(4, 2077138822, 8) ,(4, 2090444422, 9),(4, 2108588422, 8) ,(4, 2121894022, 9),(4, 2140038022, 8); + + +CREATE TABLE time_zone_transition_type (Time_zone_id int unsigned NOT NULL,Transition_type_id int unsigned NOT NULL,Offset int signed DEFAULT 0 NOT NULL,Is_DST tinyint unsigned DEFAULT 0 NOT NULL,Abbreviation char(8) DEFAULT '' NOT NULL,PRIMARY KEY TzIdTrTId (Time_zone_id, Transition_type_id)) engine=MyISAM CHARACTER SET utf8 comment='Time zone transition types'; + +INSERT INTO time_zone_transition_type (Time_zone_id,Transition_type_id, Offset, Is_DST, Abbreviation) VALUES (1, 0, 7200, 1, 'MEST') ,(1, 1, 3600, 0, 'MET') ,(1, 2, 7200, 1, 'MEST') ,(1, 3, 3600, 0, 'MET') ,(2, 0, 0, 0, 'UTC') ,(3, 0, 9000, 0, 'MMT') ,(3, 1, 12648, 1, 'MST') ,(3, 2, 9048, 0, 'MMT') ,(3, 3, 16248, 1, 'MDST') ,(3, 4, 10800, 0, 'MSK') ,(3, 5, 14400, 1, 'MSD') ,(3, 6, 18000, 1, 'MSD') ,(3, 7, 7200, 0, 'EET') ,(3, 8, 10800, 0, 'MSK') ,(3, 9, 14400, 1, 'MSD') ,(3, 10, 10800, 1, 'EEST') ,(3, 11, 7200, 0, 'EET') ,(4, 0, 9000, 0, 'MMT') ,(4, 1, 12648, 1, 'MST') ,(4, 2, 9048, 0, 'MMT') ,(4, 3, 16248, 1, 'MDST') ,(4, 4, 10800, 0, 'MSK') ,(4, 5, 14400, 1, 'MSD') ,(4, 6, 18000, 1, 'MSD') ,(4, 7, 7200, 0, 'EET') ,(4, 8, 10800, 0, 'MSK') ,(4, 9, 14400, 1, 'MSD') ,(4, 10, 10800, 1, 'EEST') ,(4, 11, 7200, 0, 'EET') ,(5, 0, 32400, 0, 'CJT') ,(5, 1, 32400, 0, 'JST'); + +CREATE TABLE time_zone_leap_second (Transition_time bigint signed NOT NULL,Correction int signed NOT NULL,PRIMARY KEY TranTime (Transition_time)) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones'; + +INSERT INTO time_zone_leap_second (Transition_time, Correction) VALUES (78796800, 1) ,(94694401, 2) ,(126230402, 3) ,(157766403, 4) ,(189302404, 5) ,(220924805, 6) ,(252460806, 7) ,(283996807, 8) ,(315532808, 9) ,(362793609, 10) ,(394329610, 11) ,(425865611, 12) ,(489024012, 13) ,(567993613, 14) ,(631152014, 15) ,(662688015, 16) ,(709948816, 17) ,(741484817, 18) ,(773020818, 19) ,(820454419, 20) ,(867715220, 21) ,(915148821, 22); + + diff --git a/mysql-test/lib/mtr_gcov.pl b/mysql-test/lib/mtr_gcov.pl new file mode 100644 index 00000000000..07aac1d2017 --- /dev/null +++ b/mysql-test/lib/mtr_gcov.pl @@ -0,0 +1,44 @@ +# -*- cperl -*- + +# This is a library file used by the Perl version of mysql-test-run, +# and is part of the translation of the Bourne shell script with the +# same name. + +use strict; + +# These are not to be prefixed with "mtr_" + +sub gcov_prepare (); +sub gcov_collect (); + +############################################################################## +# +# +# +############################################################################## + +sub gcov_prepare () { + + `find $::glob_basedir -name \*.gcov \ + -or -name \*.da | xargs rm`; +} + +sub gcov_collect () { + + print "Collecting source coverage info...\n"; + -f $::opt_gcov_msg and unlink($::opt_gcov_msg); + -f $::opt_gcov_err and unlink($::opt_gcov_err); + foreach my $d ( @::mysqld_src_dirs ) + { + chdir("$::glob_basedir/$d"); + foreach my $f ( (glob("*.h"), glob("*.cc"), glob("*.c")) ) + { + `$::opt_gcov $f 2>>$::opt_gcov_err >>$::opt_gcov_msg`; + } + chdir($::glob_mysql_test_dir); + } + print "gcov info in $::opt_gcov_msg, errors in $::opt_gcov_err\n"; +} + + +1; diff --git a/mysql-test/lib/mtr_gprof.pl b/mysql-test/lib/mtr_gprof.pl new file mode 100644 index 00000000000..cc874eebfe5 --- /dev/null +++ b/mysql-test/lib/mtr_gprof.pl @@ -0,0 +1,50 @@ +# -*- cperl -*- + +# This is a library file used by the Perl version of mysql-test-run, +# and is part of the translation of the Bourne shell script with the +# same name. + +use strict; + +# These are not to be prefixed with "mtr_" + +sub gprof_prepare (); +sub gprof_collect (); + +############################################################################## +# +# +# +############################################################################## + +sub gprof_prepare () { + + rmtree($::opt_gprof_dir); + mkdir($::opt_gprof_dir); +} + +# FIXME what about master1 and slave1?! +sub gprof_collect () { + + if ( -f "$::master->[0]->{'path_myddir'}/gmon.out" ) + { + # FIXME check result code?! + mtr_run("gprof", + [$::exe_master_mysqld, + "$::master->[0]->{'path_myddir'}/gmon.out"], + $::opt_gprof_master, "", "", ""); + print "Master execution profile has been saved in $::opt_gprof_master\n"; + } + if ( -f "$::slave->[0]->{'path_myddir'}/gmon.out" ) + { + # FIXME check result code?! + mtr_run("gprof", + [$::exe_slave_mysqld, + "$::slave->[0]->{'path_myddir'}/gmon.out"], + $::opt_gprof_slave, "", "", ""); + print "Slave execution profile has been saved in $::opt_gprof_slave\n"; + } +} + + +1; diff --git a/mysql-test/lib/mtr_io.pl b/mysql-test/lib/mtr_io.pl new file mode 100644 index 00000000000..14ea37dbb75 --- /dev/null +++ b/mysql-test/lib/mtr_io.pl @@ -0,0 +1,71 @@ +# -*- cperl -*- + +# This is a library file used by the Perl version of mysql-test-run, +# and is part of the translation of the Bourne shell script with the +# same name. + +use strict; + +sub mtr_get_pid_from_file ($); +sub mtr_get_opts_from_file ($); +sub mtr_tofile ($@); +sub mtr_tonewfile($@); + +############################################################################## +# +# +# +############################################################################## + +sub mtr_get_pid_from_file ($) { + my $file= shift; + + open(FILE,"<",$file) or mtr_error("can't open file \"$file\": $!"); + my $pid= ; + chomp($pid); + close FILE; + return $pid; +} + +sub mtr_get_opts_from_file ($) { + my $file= shift; + + open(FILE,"<",$file) or mtr_error("can't open file \"$file\": $!"); + my @args; + while ( ) + { + chomp; + s/\$MYSQL_TEST_DIR/$::glob_mysql_test_dir/g; + push(@args, split(' ', $_)); + } + close FILE; + return \@args; +} + +sub mtr_fromfile ($) { + my $file= shift; + + open(FILE,"<",$file) or mtr_error("can't open file \"$file\": $!"); + my $text= join('', ); + close FILE; + return $text; +} + +sub mtr_tofile ($@) { + my $file= shift; + + open(FILE,">>",$file) or mtr_error("can't open file \"$file\": $!"); + print FILE join("", @_); + close FILE; +} + +sub mtr_tonewfile ($@) { + my $file= shift; + + open(FILE,">",$file) or mtr_error("can't open file \"$file\": $!"); + print FILE join("", @_); + close FILE; +} + + +1; diff --git a/mysql-test/lib/mtr_match.pl b/mysql-test/lib/mtr_match.pl new file mode 100644 index 00000000000..eb5de655520 --- /dev/null +++ b/mysql-test/lib/mtr_match.pl @@ -0,0 +1,67 @@ +# -*- cperl -*- + +# This is a library file used by the Perl version of mysql-test-run, +# and is part of the translation of the Bourne shell script with the +# same name. + +use strict; + +sub mtr_match_prefix ($$); +sub mtr_match_extension ($$); +sub mtr_match_any_exact ($$); + +############################################################################## +# +# +# +############################################################################## + +# Match a prefix and return what is after the prefix + +sub mtr_match_prefix ($$) { + my $string= shift; + my $prefix= shift; + + if ( $string =~ /^\Q$prefix\E(.*)$/ ) # strncmp + { + return $1; + } + else + { + return undef; # NULL + } +} + + +# Match extension and return the name without extension + +sub mtr_match_extension ($$) { + my $file= shift; + my $ext= shift; + + if ( $file =~ /^(.*)\.\Q$ext\E$/ ) # strchr+strcmp or something + { + return $1; + } + else + { + return undef; # NULL + } +} + + +sub mtr_match_any_exact ($$) { + my $string= shift; + my $mlist= shift; + + foreach my $m (@$mlist) + { + if ( $string eq $m ) + { + return 1; + } + } + return 0; +} + +1; diff --git a/mysql-test/lib/mtr_misc.pl b/mysql-test/lib/mtr_misc.pl new file mode 100644 index 00000000000..5f80864d1f7 --- /dev/null +++ b/mysql-test/lib/mtr_misc.pl @@ -0,0 +1,50 @@ +# -*- cperl -*- + +# This is a library file used by the Perl version of mysql-test-run, +# and is part of the translation of the Bourne shell script with the +# same name. + +use strict; + +sub mtr_full_hostname (); +sub mtr_init_args ($); +sub mtr_add_arg ($$); + +############################################################################## +# +# Misc +# +############################################################################## + +# We want the fully qualified host name and hostname() may have returned +# only the short name. So we use the resolver to find out. + +sub mtr_full_hostname () { + + my $hostname= hostname(); + if ( $hostname !~ /\./ ) + { + my $address= gethostbyname($hostname) + or die "Couldn't resolve $hostname : $!"; + my $fullname= gethostbyaddr($address, AF_INET); + $hostname= $fullname if $fullname; + } + return $hostname; +} + +# FIXME move to own lib + +sub mtr_init_args ($) { + my $args = shift; + $$args = []; # Empty list +} + +sub mtr_add_arg ($$) { + my $args= shift; + my $format= shift; + my @fargs = @_; + + push(@$args, sprintf($format, @fargs)); +} + +1; diff --git a/mysql-test/lib/mtr_process.pl b/mysql-test/lib/mtr_process.pl new file mode 100644 index 00000000000..8c584802b8e --- /dev/null +++ b/mysql-test/lib/mtr_process.pl @@ -0,0 +1,467 @@ +# -*- cperl -*- + +# This is a library file used by the Perl version of mysql-test-run, +# and is part of the translation of the Bourne shell script with the +# same name. + +use Carp qw(cluck); +use strict; + +use POSIX ":sys_wait_h"; + +sub mtr_run ($$$$$$); +sub mtr_spawn ($$$$$$); +sub mtr_stop_mysqld_servers ($$); +sub mtr_kill_leftovers (); + +# static in C +sub spawn_impl ($$$$$$$); + +############################################################################## +# +# Execute an external command +# +############################################################################## + +# This function try to mimic the C version used in "netware/mysql_test_run.c" +# FIXME learn it to handle append mode as well, a "new" flag or a "append" + +sub mtr_run ($$$$$$) { + my $path= shift; + my $arg_list_t= shift; + my $input= shift; + my $output= shift; + my $error= shift; + my $pid_file= shift; + + return spawn_impl($path,$arg_list_t,1,$input,$output,$error,$pid_file); +} + +sub mtr_spawn ($$$$$$) { + my $path= shift; + my $arg_list_t= shift; + my $input= shift; + my $output= shift; + my $error= shift; + my $pid_file= shift; + + return spawn_impl($path,$arg_list_t,0,$input,$output,$error,$pid_file); +} + + +############################################################################## +# +# If $join is set, we return the error code, else we return the PID +# +############################################################################## + +sub spawn_impl ($$$$$$$) { + my $path= shift; + my $arg_list_t= shift; + my $join= shift; + my $input= shift; + my $output= shift; + my $error= shift; + my $pid_file= shift; # FIXME + + # FIXME really needing a PATH??? + # $ENV{'PATH'}= "/bin:/usr/bin:/usr/local/bin:/usr/bsd:/usr/X11R6/bin:/usr/openwin/bin:/usr/bin/X11:$ENV{'PATH'}"; + + $ENV{'TZ'}= "GMT-3"; # for UNIX_TIMESTAMP tests to work + $ENV{'LC_COLLATE'}= "C"; + $ENV{'MYSQL_TEST_DIR'}= $::glob_mysql_test_dir; + $ENV{'MASTER_MYPORT'}= $::opt_master_myport; + $ENV{'SLAVE_MYPORT'}= $::opt_slave_myport; +# $ENV{'MYSQL_TCP_PORT'}= '@MYSQL_TCP_PORT@'; # FIXME + $ENV{'MYSQL_TCP_PORT'}= 3306; + $ENV{'MASTER_MYSOCK'}= $::master->[0]->{'path_mysock'}; + + if ( $::opt_script_debug ) + { + print STDERR "\n"; + print STDERR "#### ", "-" x 78, "\n"; + print STDERR "#### ", "STDIN $input\n" if $input; + print STDERR "#### ", "STDOUT $output\n" if $output; + print STDERR "#### ", "STDERR $error\n" if $error; + if ( $join ) + { + print STDERR "#### ", "run"; + } + else + { + print STDERR "#### ", "spawn"; + } + print STDERR "$path ", join(" ",@$arg_list_t), "\n"; + print STDERR "#### ", "-" x 78, "\n"; + } + + my $pid= fork(); + + if ( $pid ) + { + # Parent, i.e. the main script + if ( $join ) + { + # We run a command and wait for the result + # FIXME this need to be improved + waitpid($pid,0); + my $exit_value= $? >> 8; + my $signal_num= $? & 127; + my $dumped_core= $? & 128; + if ( $signal_num ) + { + mtr_error("spawn got signal $signal_num"); + } + if ( $dumped_core ) + { + mtr_error("spawn dumped core"); + } + return $exit_value; + } + else + { + # We spawned a process we don't wait for + return $pid; + } + } + else + { + # Child, redirect output and exec + # FIXME I tried POSIX::setsid() here to detach and, I hoped, + # avoid zombies. But everything went wild, somehow the parent + # became a deamon as well, and was hard to kill ;-) + # Need to catch SIGCHLD and do waitpid or something instead...... + + $SIG{INT}= 'DEFAULT'; # Parent do some stuff, we don't + + if ( $output ) + { + if ( ! open(STDOUT,">",$output) ) + { + mtr_error("can't redirect STDOUT to \"$output\": $!"); + } + } + if ( $error ) + { + if ( $output eq $error ) + { + if ( ! open(STDERR,">&STDOUT") ) + { + mtr_error("can't dup STDOUT: $!"); + } + } + else + { + if ( ! open(STDERR,">",$error) ) + { + mtr_error("can't redirect STDERR to \"$output\": $!"); + } + } + } + if ( $input ) + { + if ( ! open(STDIN,"<",$input) ) + { + mtr_error("can't redirect STDIN to \"$input\": $!"); + } + } + exec($path,@$arg_list_t); + } +} + +############################################################################## +# +# Kill processes left from previous runs +# +############################################################################## + +sub mtr_kill_leftovers () { + + # First, kill all masters and slaves that would conflict with + # this run. Make sure to remove the PID file, if any. + + my @args; + + for ( my $idx; $idx < 2; $idx++ ) + { + push(@args,{ + pid => 0, # We don't know the PID + pidfile => $::master->[$idx]->{'path_mypid'}, + sockfile => $::master->[$idx]->{'path_mysock'}, + port => $::master->[$idx]->{'path_myport'}, + }); + } + + for ( my $idx; $idx < 3; $idx++ ) + { + push(@args,{ + pid => 0, # We don't know the PID + pidfile => $::slave->[$idx]->{'path_mypid'}, + sockfile => $::slave->[$idx]->{'path_mysock'}, + port => $::slave->[$idx]->{'path_myport'}, + }); + } + + mtr_stop_mysqld_servers(\@args, 1); + + # We scan the "var/run/" directory for other process id's to kill + my $rundir= "$::glob_mysql_test_dir/var/run"; # FIXME $path_run_dir or something + + if ( -d $rundir ) + { + opendir(RUNDIR, $rundir) + or mtr_error("can't open directory \"$rundir\": $!"); + + my @pids; + + while ( my $elem= readdir(RUNDIR) ) + { + my $pidfile= "$rundir/$elem"; + + if ( -f $pidfile ) + { + my $pid= mtr_get_pid_from_file($pidfile); + if ( ! unlink($pidfile) ) + { + mtr_error("can't remove $pidfile"); + } + push(@pids, $pid); + } + } + closedir(RUNDIR); + + start_reap_all(); + + if ( $::glob_cygwin_perl ) + { + # We have no (easy) way of knowing the Cygwin controlling + # process, in the PID file we only have the Windows process id. + system("kill -f " . join(" ",@pids)); # Hope for the best.... + } + else + { + my $retries= 10; # 10 seconds + do + { + kill(9, @pids); + } while ( $retries-- and kill(0, @pids) ); + + if ( kill(0, @pids) ) + { + mtr_error("can't kill processes " . join(" ", @pids)); + } + } + + stop_reap_all(); + } +} + +############################################################################## +# +# Shut down mysqld servers +# +############################################################################## + +# To speed things we kill servers in parallel. +# The argument is a list of 'pidfiles' and 'socketfiles'. +# We use the pidfiles and socketfiles to try to terminate the servers. +# This is not perfect, there could still be other server processes +# left. + +# Force flag is to be set only for killing mysqld servers this script +# didn't create in this run, i.e. initial cleanup before we start working. +# If force flag is set, we try to kill all with mysqladmin, and +# give up if we have no PIDs. + +# FIXME On some operating systems, $srv->{'pid'} and $srv->{'pidfile'} +# will not be the same PID. We need to try to kill both I think. + +sub mtr_stop_mysqld_servers ($$) { + my $spec= shift; + my $force= shift; + + # ---------------------------------------------------------------------- + # If the process was not started from this file, we got no PID, + # we try to find it in the PID file. + # ---------------------------------------------------------------------- + + my $any_pid= 0; # If we have any PIDs + + foreach my $srv ( @$spec ) + { + if ( ! $srv->{'pid'} and -f $srv->{'pidfile'} ) + { + $srv->{'pid'}= mtr_get_pid_from_file($srv->{'pidfile'}); + } + if ( $srv->{'pid'} ) + { + $any_pid= 1; + } + } + + # If the processes where started from this script, and we know + # no PIDs, then we don't have to do anything. + + if ( ! $any_pid and ! $force ) + { + # cluck "This is how we got here!"; + return; + } + + # ---------------------------------------------------------------------- + # First try nice normal shutdown using 'mysqladmin' + # ---------------------------------------------------------------------- + + start_reap_all(); # Don't require waitpid() of children + + foreach my $srv ( @$spec ) + { + if ( -e $srv->{'sockfile'} or $srv->{'port'} ) + { + # FIXME wrong log..... + # FIXME, stderr..... + # Shutdown time must be high as slave may be in reconnect + my $args; + + mtr_init_args(\$args); + + mtr_add_arg($args, "--no-defaults"); + mtr_add_arg($args, "-uroot"); + if ( -e $srv->{'sockfile'} ) + { + mtr_add_arg($args, "--socket=%s", $srv->{'sockfile'}); + } + if ( $srv->{'port'} ) + { + mtr_add_arg($args, "--port=%s", $srv->{'port'}); + } + mtr_add_arg($args, "--connect_timeout=5"); + mtr_add_arg($args, "--shutdown_timeout=70"); + mtr_add_arg($args, "shutdown"); + # We don't wait for termination of mysqladmin + mtr_spawn($::exe_mysqladmin, $args, + "", $::path_manager_log, $::path_manager_log, ""); + } + } + + # Wait for them all to remove their pid and socket file + + PIDSOCKFILEREMOVED: + for (my $loop= $::opt_sleep_time_for_delete; $loop; $loop--) + { + my $pidsockfiles_left= 0; + foreach my $srv ( @$spec ) + { + if ( -e $srv->{'sockfile'} or -f $srv->{'pidfile'} ) + { + $pidsockfiles_left++; # Could be that pidfile is left + } + } + if ( ! $pidsockfiles_left ) + { + last PIDSOCKFILEREMOVED; + } + mtr_debug("Sleep for 1 second waiting for pid and socket file removal"); + sleep(1); # One second + } + + # ---------------------------------------------------------------------- + # If no known PIDs, we have nothing more to try + # ---------------------------------------------------------------------- + + if ( ! $any_pid ) + { + stop_reap_all(); + return; + } + + # ---------------------------------------------------------------------- + # We may have killed all that left a socket, but we are not sure we got + # them all killed. If we suspect it lives, try nice kill with SIG_TERM. + # Note that for true Win32 processes, kill(0,$pid) will not return 1. + # ---------------------------------------------------------------------- + + SIGNAL: + foreach my $sig (15,9) + { + my $process_left= 0; + foreach my $srv ( @$spec ) + { + if ( $srv->{'pid'} and + ( -f $srv->{'pidfile'} or kill(0,$srv->{'pid'}) ) ) + { + $process_left++; + mtr_warning("process $srv->{'pid'} not cooperating, " . + "will send signal $sig to process"); + kill($sig,$srv->{'pid'}); # SIG_TERM + } + if ( ! $process_left ) + { + last SIGNAL; + } + } + mtr_debug("Sleep for 5 seconds waiting for processes to die"); + sleep(5); # We wait longer than usual + } + + # ---------------------------------------------------------------------- + # Now, we check if all we can find using kill(0,$pid) are dead, + # and just assume the rest are. We cleanup socket and PID files. + # ---------------------------------------------------------------------- + + { + my $errors= 0; + foreach my $srv ( @$spec ) + { + if ( $srv->{'pid'} ) + { + if ( kill(0,$srv->{'pid'}) ) + { + # FIXME In Cygwin there seem to be some fast reuse + # of PIDs, so dying may not be the right thing to do. + $errors++; + mtr_warning("can't kill process $srv->{'pid'}"); + } + else + { + # We managed to kill it at last + # FIXME In Cygwin, we will get here even if the process lives. + + # Not needed as we know the process is dead, but to be safe + # we unlink and check success in two steps. We first unlink + # without checking the error code, and then check if the + # file still exists. + + foreach my $file ($srv->{'pidfile'}, $srv->{'sockfile'}) + { + unlink($file); + if ( -e $file ) + { + $errors++; + mtr_warning("couldn't delete $file"); + } + } + } + } + } + if ( $errors ) + { + # We are in trouble, just die.... + mtr_error("we could not kill or clean up all processes"); + } + } + + stop_reap_all(); + + # FIXME We just assume they are all dead, we don't know.... +} + +sub start_reap_all { + $SIG{CHLD}= 'IGNORE'; # FIXME is this enough? +} + +sub stop_reap_all { + $SIG{CHLD}= 'DEFAULT'; +} + +1; diff --git a/mysql-test/lib/mtr_report.pl b/mysql-test/lib/mtr_report.pl new file mode 100644 index 00000000000..0f75fc1341a --- /dev/null +++ b/mysql-test/lib/mtr_report.pl @@ -0,0 +1,262 @@ +# -*- cperl -*- + +# This is a library file used by the Perl version of mysql-test-run, +# and is part of the translation of the Bourne shell script with the +# same name. + +use strict; + +sub mtr_report_test_name($); +sub mtr_report_test_passed($); +sub mtr_report_test_failed($); +sub mtr_report_test_skipped($); + +sub mtr_show_failed_diff ($); +sub mtr_report_stats ($); +sub mtr_print_line (); +sub mtr_print_thick_line (); +sub mtr_print_header (); +sub mtr_report (@); +sub mtr_warning (@); +sub mtr_error (@); +sub mtr_debug (@); + + +############################################################################## +# +# +# +############################################################################## + +# We can't use diff -u or diff -a as these are not portable + +sub mtr_show_failed_diff ($) { + my $tname= shift; + + my $reject_file= "r/$tname.reject"; + my $result_file= "r/$tname.result"; + my $eval_file= "r/$tname.eval"; + + if ( -f $eval_file ) + { + $result_file= $eval_file; + } + elsif ( $::opt_result_ext and + ( $::opt_record or -f "$result_file$::opt_result_ext" )) + { + # If we have an special externsion for result files we use it if we are + # recording or a result file with that extension exists. + $result_file= "$result_file$::opt_result_ext"; + } + + if ( -f $reject_file ) + { + print "Below are the diffs between actual and expected results:\n"; + print "-------------------------------------------------------\n"; + # FIXME check result code?! + mtr_run("diff",["-c",$result_file,$reject_file], "", "", "", ""); + print "-------------------------------------------------------\n"; + print "Please follow the instructions outlined at\n"; + print "http://www.mysql.com/doc/en/Reporting_mysqltest_bugs.html\n"; + print "to find the reason to this problem and how to report this.\n\n"; + } +} + +sub mtr_report_test_name ($) { + my $tinfo= shift; + + printf "%-31s ", $tinfo->{'name'}; +} + +sub mtr_report_test_skipped ($) { + my $tinfo= shift; + + $tinfo->{'result'}= 'MTR_RES_SKIPPED'; + print "[ skipped ]\n"; +} + +sub mtr_report_test_passed ($) { + my $tinfo= shift; + + my $timer= ""; +# FIXME +# if ( $::opt_timer and -f "$::glob_mysql_test_dir/var/log/timer" ) +# { +# $timer= `cat var/log/timer`; +# $timer= sprintf "%13s", $timer; +# } + $tinfo->{'result'}= 'MTR_RES_PASSED'; + print "[ pass ] $timer\n"; +} + +sub mtr_report_test_failed ($) { + my $tinfo= shift; + + $tinfo->{'result'}= 'MTR_RES_FAILED'; + print "[ fail ]\n"; + + print "Errors are (from $::path_timefile) :\n"; + print mtr_fromfile($::path_timefile); # FIXME print_file() instead + print "\n(the last lines may be the most important ones)\n"; +} + +sub mtr_report_stats ($) { + my $tests= shift; + + # ---------------------------------------------------------------------- + # Find out how we where doing + # ---------------------------------------------------------------------- + + my $tot_skiped= 0; + my $tot_passed= 0; + my $tot_failed= 0; + my $tot_tests= 0; + + foreach my $tinfo (@$tests) + { + if ( $tinfo->{'result'} eq 'MTR_RES_SKIPPED' ) + { + $tot_skiped++; + } + elsif ( $tinfo->{'result'} eq 'MTR_RES_PASSED' ) + { + $tot_tests++; + $tot_passed++; + } + elsif ( $tinfo->{'result'} eq 'MTR_RES_FAILED' ) + { + $tot_tests++; + $tot_failed++; + } + } + + # ---------------------------------------------------------------------- + # Print out a summary report to screen + # ---------------------------------------------------------------------- + + if ( ! $tot_failed ) + { + print "All $tot_tests tests were successful.\n"; + } + else + { + my $ratio= $tot_passed * 100 / $tot_tests; + printf "Failed $tot_failed/$tot_tests tests, " . + "%.2f\% successful.\n\n", $ratio; + print + "The log files in var/log may give you some hint\n", + "of what when wrong.\n", + "If you want to report this error, please read first ", + "the documentation at\n", + "http://www.mysql.com/doc/en/MySQL_test_suite.html\n"; + } + + # ---------------------------------------------------------------------- + # ---------------------------------------------------------------------- + + if ( ! $::glob_use_running_server ) + { + + # Report if there was any fatal warnings/errors in the log files + # + unlink("$::glob_mysql_test_dir/var/log/warnings"); + unlink("$::glob_mysql_test_dir/var/log/warnings.tmp"); + # Remove some non fatal warnings from the log files + +# FIXME what is going on ????? ;-) +# sed -e 's!Warning: Table:.* on delete!!g' -e 's!Warning: Setting lower_case_table_names=2!!g' -e 's!Warning: One can only use the --user.*root!!g' \ +# var/log/*.err \ +# | sed -e 's!Warning: Table:.* on rename!!g' \ +# > var/log/warnings.tmp; +# +# found_error=0; +# # Find errors +# for i in "^Warning:" "^Error:" "^==.* at 0x" +# do +# if ( $GREP "$i" var/log/warnings.tmp >> var/log/warnings ) +# { +# found_error=1 +# } +# done +# unlink("$::glob_mysql_test_dir/var/log/warnings.tmp"); +# if ( $found_error= "1" ) +# { +# print "WARNING: Got errors/warnings while running tests. Please examine\n" +# print "$::glob_mysql_test_dir/var/log/warnings for details.\n" +# } +# } + } + + print "\n"; + + if ( $tot_failed != 0 ) + { + print "mysql-test-run: *** Failing the test(s):"; + + foreach my $tinfo (@$tests) + { + if ( $tinfo->{'result'} eq 'MTR_RES_FAILED' ) + { + print " $tinfo->{'name'}"; + } + } + print "\n"; + mtr_error("there where failing test cases"); + } +} + +############################################################################## +# +# Text formatting +# +############################################################################## + +sub mtr_print_line () { + print '-' x 55, "\n"; +} + +sub mtr_print_thick_line () { + print '=' x 55, "\n"; +} + +sub mtr_print_header () { + print "\n"; + if ( $::opt_timer ) + { + print "TEST RESULT TIME (ms)\n"; + } + else + { + print "TEST RESULT\n"; + } + mtr_print_line(); + print "\n"; +} + + +############################################################################## +# +# Misc +# +############################################################################## + +sub mtr_report (@) { + print join(" ", @_),"\n"; +} + +sub mtr_warning (@) { + print STDERR "mysql-test-run: WARNING: ",join(" ", @_),"\n"; +} + +sub mtr_error (@) { + die "mysql-test-run: *** ERROR: ",join(" ", @_),"\n"; +} + +sub mtr_debug (@) { + if ( $::opt_script_debug ) + { + print STDERR "####: ",join(" ", @_),"\n"; + } +} + +1; diff --git a/mysql-test/my_manage.c b/mysql-test/my_manage.c index 472b0d32683..1f006f7ab90 100644 --- a/mysql-test/my_manage.c +++ b/mysql-test/my_manage.c @@ -327,7 +327,7 @@ int spawn(char *path, arg_list_t *al, int join, char *input, int spawn(char *path, arg_list_t *al, int join, char *input, char *output, char *error, HANDLE *pid) { - intptr_t result; + bool result; int i; STARTUPINFO startup_info; PROCESS_INFORMATION process_information; @@ -665,7 +665,11 @@ void del_tree(char *dir) rmdir(dir); #else struct _finddata_t parent; +#if defined(_MSC_VER) && _MSC_VER > 1200 intptr_t handle; +#else + long handle; +#endif /* _MSC_VER && _MSC_VER > 1200 */ char temp[FN_REFLEN]; char mask[FN_REFLEN]; @@ -728,7 +732,11 @@ int removef(const char *format, ...) va_list ap; char path[FN_REFLEN]; struct _finddata_t parent; +#if defined(_MSC_VER) && _MSC_VER > 1200 intptr_t handle; +#else + long handle; +#endif /* _MSC_VER && _MSC_VER > 1200 */ char temp[FN_REFLEN]; char *p; diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl new file mode 100755 index 00000000000..01729aa1018 --- /dev/null +++ b/mysql-test/mysql-test-run.pl @@ -0,0 +1,2182 @@ +#!/usr/bin/perl +# -*- cperl -*- + +# This is a transformation of the "mysql-test-run" Bourne shell script +# to Perl. This is just an intermediate step, the goal is to rewrite +# the Perl script to C. The complexity of the mysql-test-run script +# makes it a bit hard to write and debug it as a C program directly, +# so this is considered a prototype. +# +# Because of this the Perl coding style may in some cases look a bit +# funny. The rules used are +# +# - The coding style is as close as possible to the C/C++ MySQL +# coding standard. +# +# - Where NULL is to be returned, the undefined value is used. +# +# - Regexp comparisons are simple and can be translated to strcmp +# and other string functions. To ease this transformation matching +# is done in the lib "lib/mtr_match.pl", i.e. regular expressions +# should be avoided in the main program. +# +# - The "unless" construct is not to be used. It is the same as "if !". +# +# - opendir/readdir/closedir is used instead of glob()/<*>. +# +# - All lists of arguments to send to commands are Perl lists/arrays, +# not strings we append args to. Within reason, most string +# concatenation for arguments should be avoided. +# +# - sprintf() is to be used, within reason, for all string creation. +# This mtr_add_arg() function is also based on sprintf(), i.e. you +# use a format string and put the variable argument in the argument +# list. +# +# - Functions defined in the main program are not to be prefixed, +# functions in "library files" are to be prefixed with "mtr_" (for +# Mysql-Test-Run). There are some exceptions, code that fits best in +# the main program, but are put into separate files to avoid +# clutter, may be without prefix. +# +# - All stat/opendir/-f/ is to be kept in collect_test_cases(). It +# will create a struct that the rest of the program can use to get +# the information. This separates the "find information" from the +# "do the work" and makes the program more easy to maintain. +# +# - At the moment, there are tons of "global" variables that control +# this script, even accessed from the files in "lib/*.pl". This +# will change over time, for now global variables are used instead +# of using %opt, %path and %exe hashes, because I want more +# compile time checking, that hashes would not give me. Once this +# script is debugged, hashes will be used and passed as parameters +# to functions, to more closely mimic how it would be coded in C +# using structs. +# +# - The rule when it comes to the logic of this program is +# +# command_line_setup() - is to handle the logic between flags +# collect_test_cases() - is to do its best to select what tests +# to run, dig out options, if needs restart etc. +# run_testcase() - is to run a single testcase, and follow the +# logic set in both above. No, or rare file +# system operations. If a test seems complex, +# it should probably not be here. +# +# A nice way to trace the execution of this script while debugging +# is to use the Devel::Trace package found at +# "http://www.plover.com/~mjd/perl/Trace/" and run this script like +# "perl -d:Trace mysql-test-run.pl" +# +# FIXME Save a PID file from this code as well, to record the process +# id we think it has. In Cygwin, a fork creates one Cygwin process, +# and then the real Win32 process. Cygwin Perl can only kill Cygwin +# processes. And "mysqld --bootstrap ..." doesn't save a PID file. + +$Devel::Trace::TRACE= 0; # Don't trace boring init stuff + +#require 5.6.1; +use File::Path; +use File::Basename; +use Cwd; +use Getopt::Long; +use Sys::Hostname; +#use Carp; +use IO::Socket; +use IO::Socket::INET; +use Data::Dumper; +use strict; +#use diagnostics; + +require "lib/mtr_process.pl"; +require "lib/mtr_io.pl"; +require "lib/mtr_gcov.pl"; +require "lib/mtr_gprof.pl"; +require "lib/mtr_report.pl"; +require "lib/mtr_match.pl"; +require "lib/mtr_misc.pl"; + +$Devel::Trace::TRACE= 1; + +my @skip_if_embedded_server= + ( + "alter_table", + "bdb-deadlock", + "connect", + "flush_block_commit", + "grant2", + "grant_cache", + "grant", + "init_connect", + "innodb-deadlock", + "innodb-lock", + "mix_innodb_myisam_binlog", + "mysqlbinlog2", + "mysqlbinlog", + "mysqldump", + "mysql_protocols", + "ps_1general", + "rename", + "show_check", + "system_mysql_db_fix", + "user_var", + "variables", + ); + +# Used by gcov +our @mysqld_src_dirs= + ( + "strings", + "mysys", + "include", + "extra", + "regex", + "isam", + "merge", + "myisam", + "myisammrg", + "heap", + "sql", + ); + +############################################################################## +# +# Default settings +# +############################################################################## + +# We are to use handle_options() in "mysys/my_getopt.c" for the C version +# +# In the C version we want to use structs and, in some cases, arrays of +# structs. We let each struct be a separate hash. + +# Misc global variables + +our $glob_win32= 0; # OS and native Win32 executables +our $glob_win32_perl= 0; # ActiveState Win32 Perl +our $glob_cygwin_perl= 0; # Cygwin Perl +our $glob_mysql_test_dir= undef; +our $glob_mysql_bench_dir= undef; +our $glob_hostname= undef; +our $glob_scriptname= undef; +our $glob_use_running_server= 0; +our $glob_use_running_ndbcluster= 0; +our $glob_user= 'test'; +our $glob_use_embedded_server= 0; + +our $glob_basedir; +our $glob_do_test; + +# The total result + +our $path_charsetsdir; +our $path_client_bindir; +our $path_language; +our $path_tests_bindir; +our $path_timefile; +our $path_manager_log; # Used by mysqldadmin +our $path_slave_load_tmpdir; # What is this?! +our $path_my_basedir; +our $opt_tmpdir; # A path but set directly on cmd line + +our $opt_usage; +our $opt_suite; + +our $opt_netware; + +our $opt_script_debug= 0; # Script debugging, enable with --script-debug + +# Options FIXME not all.... + +our $exe_master_mysqld; +our $exe_mysql; +our $exe_mysqladmin; +our $exe_mysqlbinlog; +our $exe_mysqld; +our $exe_mysqldump; # Called from test case +our $exe_mysqltest; +our $exe_slave_mysqld; + +our $opt_bench= 0; +our $opt_small_bench= 0; +our $opt_big_test= 0; # Send --big-test to mysqltest + +our $opt_extra_mysqld_opt; # FIXME not handled + +our $opt_compress; +our $opt_current_test; +our $opt_ddd; +our $opt_debug; +our $opt_do_test; +our $opt_embedded_server; +our $opt_extern; +our $opt_fast; +our $opt_force; + +our $opt_gcov; +our $opt_gcov_err; +our $opt_gcov_msg; + +our $opt_gdb; +our $opt_client_gdb; +our $opt_manual_gdb; + +our $opt_gprof; +our $opt_gprof_dir; +our $opt_gprof_master; +our $opt_gprof_slave; + +our $opt_local; +our $opt_local_master; + +our $master; # Will be struct in C +our $slave; + +our $opt_ndbcluster_port; +our $opt_ndbconnectstring; + +our $opt_no_manager; # Does nothing now, we never use manager + +our $opt_old_master; + +our $opt_record; + +our $opt_result_ext; + +our $opt_skip; +our $opt_skip_rpl; +our $opt_skip_test; + +our $opt_sleep; + +our $opt_ps_protocol; + +# FIXME all of the sleep time handling needs cleanup +our $opt_sleep_time_after_restart= 1; +our $opt_sleep_time_for_delete= 10; +our $opt_sleep_time_for_first_master= 400; # enough time create innodb tables +our $opt_sleep_time_for_second_master= 400; +our $opt_sleep_time_for_first_slave= 400; +our $opt_sleep_time_for_second_slave= 30; + +our $opt_socket; + +our $opt_source_dist; + +our $opt_start_and_exit; +our $opt_start_from; + +our $opt_strace_client; + +our $opt_timer; + + +our $opt_user_test; + +our $opt_valgrind; +our $opt_valgrind_all; +our $opt_valgrind_options; + +our $opt_verbose; + +our $opt_wait_for_master; +our $opt_wait_for_slave; +our $opt_wait_timeout= 10; + +our $opt_warnings; + +our $opt_with_ndbcluster; +our $opt_with_openssl; + + +###################################################################### +# +# Function declarations +# +###################################################################### + +sub main (); +sub initial_setup (); +sub command_line_setup (); +sub executable_setup (); +sub kill_and_cleanup (); +sub collect_test_cases ($); +sub sleep_until_file_created ($$); +sub ndbcluster_start (); +sub ndbcluster_stop (); +sub run_benchmarks ($); +sub run_tests (); +sub mysql_install_db (); +sub install_db ($$); +sub run_testcase ($); +sub do_before_start_master ($$); +sub do_before_start_slave ($$); +sub mysqld_start ($$$$); +sub mysqld_arguments ($$$$$); +sub stop_masters_slaves (); +sub stop_masters (); +sub stop_slaves (); +sub run_mysqltest ($$); +sub usage ($); + +###################################################################### +# +# Main program +# +###################################################################### + +main(); + +sub main () { + + initial_setup(); + command_line_setup(); + executable_setup(); + signal_setup(); + + if ( $opt_gcov ) + { + gcov_prepare(); + } + + if ( $opt_gprof ) + { + gprof_prepare(); + } + + if ( ! $glob_use_running_server ) + { + kill_and_cleanup(); + mysql_install_db(); + + if ( $opt_with_ndbcluster and ! $glob_use_running_ndbcluster ) + { + ndbcluster_start(); # We start the cluster storage engine + } + +# mysql_loadstd(); FIXME copying from "std_data" .frm and +# .MGR but there are none?! + } + + if ( $opt_start_and_exit ) + { + mtr_report("Servers started, exiting"); + } + else + { + if ( $opt_bench ) + { + run_benchmarks(shift); # Shift what? Extra arguments?! + } + else + { + run_tests(); + } + } + + exit(0); +} + +############################################################################## +# +# Initial setup independent on command line arguments +# +############################################################################## + +sub initial_setup () { + + select(STDOUT); + $| = 1; # Make unbuffered + + $glob_scriptname= basename($0); + + $glob_win32_perl= ($^O eq "MSWin32"); + $glob_cygwin_perl= ($^O eq "cygwin"); + $glob_win32= ($glob_win32_perl or $glob_cygwin_perl); + + # We require that we are in the "mysql-test" directory + # to run mysql-test-run + + if (! -f $glob_scriptname) + { + mtr_error("Can't find the location for the mysql-test-run script\n" . + "Go to to the mysql-test directory and execute the script " . + "as follows:\n./$glob_scriptname"); + } + + if ( -d "../sql" ) + { + $opt_source_dist= 1; + } + + $glob_hostname= mtr_full_hostname(); + + # 'basedir' is always parent of "mysql-test" directory + $glob_mysql_test_dir= cwd(); + if ( $glob_cygwin_perl ) + { + # Windows programs like 'mysqld' needs Windows paths + $glob_mysql_test_dir= `cygpath -m $glob_mysql_test_dir`; + chomp($glob_mysql_test_dir); + } + $glob_basedir= dirname($glob_mysql_test_dir); + $glob_mysql_bench_dir= "$glob_basedir/mysql-bench"; # FIXME make configurable + + $path_timefile= "$glob_mysql_test_dir/var/log/mysqltest-time"; + + # needs to be same length to test logging (FIXME what???) + $path_slave_load_tmpdir= "../../var/tmp"; + + $path_my_basedir= + $opt_source_dist ? $glob_mysql_test_dir : $glob_basedir; +} + + + +############################################################################## +# +# Default settings +# +############################################################################## + +sub command_line_setup () { + + # These are defaults for things that are set on the command line + + $opt_suite= "main"; # Special default suite + $opt_tmpdir= "$glob_mysql_test_dir/var/tmp"; + # FIXME maybe unneded? + $path_manager_log= "$glob_mysql_test_dir/var/log/manager.log"; + $opt_current_test= "$glob_mysql_test_dir/var/log/current_test"; + + my $opt_master_myport= 9306; + my $opt_slave_myport= 9308; + $opt_ndbcluster_port= 9350; + $opt_sleep_time_for_delete= 10; + + my $opt_user; + + # Read the command line + # Note: Keep list, and the order, in sync with usage at end of this file + + GetOptions( + # Control what engine/variation to run + 'embedded-server' => \$opt_embedded_server, + 'ps-protocol' => \$opt_ps_protocol, + 'bench' => \$opt_bench, + 'small-bench' => \$opt_small_bench, + 'no-manager' => \$opt_no_manager, + + # Control what test suites or cases to run + 'force' => \$opt_force, + 'with-ndbcluster' => \$opt_with_ndbcluster, + 'do-test=s' => \$opt_do_test, + 'suite=s' => \$opt_suite, + 'skip-rpl' => \$opt_skip_rpl, + 'skip-test=s' => \$opt_skip_test, + + # Specify ports + 'master_port=i' => \$opt_master_myport, + 'slave_port=i' => \$opt_slave_myport, + 'ndbcluster_port=i' => \$opt_ndbcluster_port, + + # Test case authoring + 'record' => \$opt_record, + + # ??? + 'mysqld=s' => \$opt_extra_mysqld_opt, + + # Run test on running server + 'extern' => \$opt_extern, + 'ndbconnectstring=s' => \$opt_ndbconnectstring, + + # Debugging + 'gdb' => \$opt_gdb, + 'manual-gdb' => \$opt_manual_gdb, + 'client-gdb' => \$opt_client_gdb, + 'ddd' => \$opt_ddd, + 'strace-client' => \$opt_strace_client, + 'master-binary=s' => \$exe_master_mysqld, + 'slave-binary=s' => \$exe_slave_mysqld, + + # Coverage, profiling etc + 'gcov' => \$opt_gcov, + 'gprof' => \$opt_gprof, + 'valgrind' => \$opt_valgrind, + 'valgrind-all' => \$opt_valgrind_all, + 'valgrind-options=s' => \$opt_valgrind_options, + + # Misc + 'big-test' => \$opt_big_test, + 'compress' => \$opt_compress, + 'debug' => \$opt_debug, + 'fast' => \$opt_fast, + 'local' => \$opt_local, + 'local-master' => \$opt_local_master, + 'netware' => \$opt_netware, + 'old-master' => \$opt_old_master, + 'script-debug' => \$opt_script_debug, + 'sleep=i' => \$opt_sleep, + 'socket=s' => \$opt_socket, + 'start-and-exit' => \$opt_start_and_exit, + 'start-from=s' => \$opt_start_from, + 'timer' => \$opt_timer, + 'tmpdir=s' => \$opt_tmpdir, + 'user-test=s' => \$opt_user_test, + 'user=s' => \$opt_user, + 'verbose' => \$opt_verbose, + 'wait-timeout=i' => \$opt_wait_timeout, + 'warnings|log-warnings' => \$opt_warnings, + 'with-openssl' => \$opt_with_openssl, + + 'help|h' => \$opt_usage, + ) or usage("Can't read options"); + + if ( $opt_usage ) + { + usage(""); + } + + # Put this into a hash, will be a C struct + + $master->[0]->{'path_myddir'}= "$glob_mysql_test_dir/var/master-data"; + $master->[0]->{'path_myerr'}= "$glob_mysql_test_dir/var/log/master.err"; + $master->[0]->{'path_mylog'}= "$glob_mysql_test_dir/var/log/master.log"; + $master->[0]->{'path_mypid'}= "$glob_mysql_test_dir/var/run/master.pid"; + $master->[0]->{'path_mysock'}= "$opt_tmpdir/master.sock"; + $master->[0]->{'path_myport'}= $opt_master_myport; + + $master->[1]->{'path_myddir'}= "$glob_mysql_test_dir/var/master1-data"; + $master->[1]->{'path_myerr'}= "$glob_mysql_test_dir/var/log/master1.err"; + $master->[1]->{'path_mylog'}= "$glob_mysql_test_dir/var/log/master1.log"; + $master->[1]->{'path_mypid'}= "$glob_mysql_test_dir/var/run/master1.pid"; + $master->[1]->{'path_mysock'}= "$opt_tmpdir/master1.sock"; + $master->[1]->{'path_myport'}= $opt_master_myport + 1; + + $slave->[0]->{'path_myddir'}= "$glob_mysql_test_dir/var/slave-data"; + $slave->[0]->{'path_myerr'}= "$glob_mysql_test_dir/var/log/slave.err"; + $slave->[0]->{'path_mylog'}= "$glob_mysql_test_dir/var/log/slave.log"; + $slave->[0]->{'path_mypid'}= "$glob_mysql_test_dir/var/run/slave.pid"; + $slave->[0]->{'path_mysock'}= "$opt_tmpdir/slave.sock"; + $slave->[0]->{'path_myport'}= $opt_slave_myport; + + $slave->[1]->{'path_myddir'}= "$glob_mysql_test_dir/var/slave1-data"; + $slave->[1]->{'path_myerr'}= "$glob_mysql_test_dir/var/log/slave1.err"; + $slave->[1]->{'path_mylog'}= "$glob_mysql_test_dir/var/log/slave1.log"; + $slave->[1]->{'path_mypid'}= "$glob_mysql_test_dir/var/run/slave1.pid"; + $slave->[1]->{'path_mysock'}= "$opt_tmpdir/slave1.sock"; + $slave->[1]->{'path_myport'}= $opt_slave_myport + 1; + + $slave->[2]->{'path_myddir'}= "$glob_mysql_test_dir/var/slave2-data"; + $slave->[2]->{'path_myerr'}= "$glob_mysql_test_dir/var/log/slave2.err"; + $slave->[2]->{'path_mylog'}= "$glob_mysql_test_dir/var/log/slave2.log"; + $slave->[2]->{'path_mypid'}= "$glob_mysql_test_dir/var/run/slave2.pid"; + $slave->[2]->{'path_mysock'}= "$opt_tmpdir/slave2.sock"; + $slave->[2]->{'path_myport'}= $opt_slave_myport + 2; + + # Do sanity checks of command line arguments + + if ( $opt_extern and $opt_local ) + { + mtr_error("Can't use --extern and --local at the same time"); + } + + if ( ! $opt_socket ) + { # FIXME set default before reading options? +# $opt_socket= '@MYSQL_UNIX_ADDR@'; + $opt_socket= "/tmp/mysql.sock"; # FIXME + } + + if ( $opt_extern ) + { + $glob_use_running_server= 1; + $opt_skip_rpl= 1; # We don't run rpl test cases + $master->[0]->{'path_mysock'}= $opt_socket; + } + + # -------------------------------------------------------------------------- + # Set LD_LIBRARY_PATH if we are using shared libraries + # -------------------------------------------------------------------------- + $ENV{'LD_LIBRARY_PATH'}= + "$glob_basedir/lib:$glob_basedir/libmysql/.libs" . + ($ENV{'LD_LIBRARY_PATH'} ? ":$ENV{'LD_LIBRARY_PATH'}" : ""); + $ENV{'DYLD_LIBRARY_PATH'}= + "$glob_basedir/lib:$glob_basedir/libmysql/.libs" . + ($ENV{'DYLD_LIBRARY_PATH'} ? ":$ENV{'DYLD_LIBRARY_PATH'}" : ""); + + # -------------------------------------------------------------------------- + # Look at the command line options and set script flags + # -------------------------------------------------------------------------- + + if ( $opt_record and ! @ARGV) + { + mtr_error("Will not run in record mode without a specific test case"); + } + + if ( $opt_embedded_server ) + { + $glob_use_embedded_server= 1; + $opt_skip_rpl= 1; # We never run replication with embedded + + if ( $opt_extern ) + { + mtr_error("Can't use --extern with --embedded-server"); + } + $opt_result_ext= ".es"; + } + + # FIXME don't understand what this is +# if ( $opt_local_master ) +# { +# $opt_master_myport= 3306; +# } + + if ( $opt_small_bench ) + { + $opt_bench= 1; + } + + if ( $opt_sleep ) + { + $opt_sleep_time_after_restart= $opt_sleep; + } + + if ( $opt_gcov and ! $opt_source_dist ) + { + mtr_error("Coverage test needs the source - please use source dist"); + } + + if ( $glob_use_embedded_server and ! $opt_source_dist ) + { + mtr_error("Embedded server needs source tree - please use source dist"); + } + + if ( $opt_gdb ) + { + $opt_wait_timeout= 300; + if ( $opt_extern ) + { + mtr_error("Can't use --extern with --gdb"); + } + } + + if ( $opt_manual_gdb ) + { + $opt_gdb= 1; + if ( $opt_extern ) + { + mtr_error("Can't use --extern with --manual-gdb"); + } + } + + if ( $opt_ddd ) + { + if ( $opt_extern ) + { + mtr_error("Can't use --extern with --ddd"); + } + } + + if ( $opt_ndbconnectstring ) + { + $glob_use_running_ndbcluster= 1; + $opt_with_ndbcluster= 1; + } + + # FIXME + + #if ( $opt_valgrind or $opt_valgrind_all ) + #{ + # VALGRIND=`which valgrind` # this will print an error if not found FIXME + # Give good warning to the user and stop + # if ( ! $VALGRIND ) + # { + # print "You need to have the 'valgrind' program in your PATH to run mysql-test-run with option --valgrind. Valgrind's home page is http://valgrind.kde.org.\n" + # exit 1 + # } + # >=2.1.2 requires the --tool option, some versions write to stdout, some to stderr + # valgrind --help 2>&1 | grep "\-\-tool" > /dev/null && VALGRIND="$VALGRIND --tool=memcheck" + # VALGRIND="$VALGRIND --alignment=8 --leak-check=yes --num-callers=16" + # $opt_extra_mysqld_opt.= " --skip-safemalloc --skip-bdb"; + # SLEEP_TIME_AFTER_RESTART=10 + # $opt_sleep_time_for_delete= 60 + # $glob_use_running_server= "" + # if ( "$1"= "--valgrind-all" ) + # { + # VALGRIND="$VALGRIND -v --show-reachable=yes" + # } + #} + + if ( $opt_user ) + { + $glob_user= $opt_user; + } + elsif ( $glob_use_running_server ) + { + $glob_user= "test"; + } + else + { + $glob_user= "root"; # We want to do FLUSH xxx commands + } + +} + + +############################################################################## +# +# Set paths to various executable programs +# +############################################################################## + +sub executable_setup () { + + if ( $opt_source_dist ) + { + if ( $glob_use_embedded_server ) + { + if ( -f "$glob_basedir/libmysqld/examples/mysqltest" ) + { + $exe_mysqltest= "$glob_basedir/libmysqld/examples/mysqltest"; + } + else + { + mtr_error("Cannot find embedded server 'mysqltest'"); + } + $path_tests_bindir= "$glob_basedir/libmysqld/examples"; + } + else + { + if ( -f "$glob_basedir/client/.libs/lt-mysqltest" ) + { + $exe_mysqltest= "$glob_basedir/client/.libs/lt-mysqltest"; + } + elsif ( -f "$glob_basedir/client/.libs/mysqltest" ) + { + $exe_mysqltest= "$glob_basedir/client/.libs/mysqltest"; + } + else + { + $exe_mysqltest= "$glob_basedir/client/mysqltest"; + } + $path_tests_bindir= "$glob_basedir/tests"; + } + if ( -f "$glob_basedir/client/.libs/mysqldump" ) + { + $exe_mysqldump= "$glob_basedir/client/.libs/mysqldump"; + } + else + { + $exe_mysqldump= "$glob_basedir/client/mysqldump"; + } + if ( -f "$glob_basedir/client/.libs/mysqlbinlog" ) + { + $exe_mysqlbinlog= "$glob_basedir/client/.libs/mysqlbinlog"; + } + else + { + $exe_mysqlbinlog= "$glob_basedir/client/mysqlbinlog"; + } + + $exe_mysqld= "$glob_basedir/sql/mysqld"; + $path_client_bindir= "$glob_basedir/client"; + $exe_mysqladmin= "$path_client_bindir/mysqladmin"; + $exe_mysql= "$path_client_bindir/mysql"; + $path_language= "$glob_basedir/sql/share/english/"; + $path_charsetsdir= "$glob_basedir/sql/share/charsets"; + } + else + { + $path_client_bindir= "$glob_basedir/bin"; + $path_tests_bindir= "$glob_basedir/tests"; + $exe_mysqltest= "$path_client_bindir/mysqltest"; + $exe_mysqldump= "$path_client_bindir/mysqldump"; + $exe_mysqlbinlog= "$path_client_bindir/mysqlbinlog"; + $exe_mysqladmin= "$path_client_bindir/mysqladmin"; + $exe_mysql= "$path_client_bindir/mysql"; + if ( -d "$glob_basedir/share/mysql/english" ) + { + $path_language ="$glob_basedir/share/mysql/english/"; + $path_charsetsdir ="$glob_basedir/share/mysql/charsets"; + } + else + { + $path_language ="$glob_basedir/share/english/"; + $path_charsetsdir ="$glob_basedir/share/charsets"; + } + + if ( -x "$glob_basedir/libexec/mysqld" ) + { + $exe_mysqld= "$glob_basedir/libexec/mysqld"; + } + else + { + $exe_mysqld= "$glob_basedir/bin/mysqld"; + } + + } + + # FIXME special $exe_master_mysqld and $exe_slave_mysqld + # are not used that much.... + + if ( ! $exe_master_mysqld ) + { + $exe_master_mysqld= $exe_mysqld; + } + + if ( ! $exe_slave_mysqld ) + { + $exe_slave_mysqld= $exe_mysqld; + } +} + + +############################################################################## +# +# If we get a ^C, we try to clean up before termination +# +############################################################################## +# FIXME check restrictions what to do in a signal handler + +sub signal_setup () { + $SIG{INT}= \&handle_int_signal; +} + +sub handle_int_signal () { + $SIG{INT}= 'DEFAULT'; # If we get a ^C again, we die... + mtr_warning("got INT signal, cleaning up....."); + stop_masters_slaves(); + mtr_error("We die from ^C signal from user"); +} + + +############################################################################## +# +# Collect information about test cases we are to run +# +############################################################################## + +sub collect_test_cases ($) { + my $suite= shift; # Test suite name + + my $testdir; + my $resdir; + + if ( $suite eq "main" ) + { + $testdir= "$glob_mysql_test_dir/t"; + $resdir= "$glob_mysql_test_dir/r"; + } + else + { + $testdir= "$glob_mysql_test_dir/suite/$suite/t"; + $resdir= "$glob_mysql_test_dir/suite/$suite/r"; + } + + my @tests; # Array of hash, will be array of C struct + + opendir(TESTDIR, $testdir) or mtr_error("Can't open dir \"$testdir\": $!"); + + foreach my $elem ( sort readdir(TESTDIR) ) { + my $tname= mtr_match_extension($elem,"test"); + next if ! defined $tname; + next if $opt_do_test and ! defined mtr_match_prefix($elem,$opt_do_test); + my $path= "$testdir/$elem"; + + # ---------------------------------------------------------------------- + # Skip some tests silently + # ---------------------------------------------------------------------- + + if ( $opt_start_from and $tname lt $opt_start_from ) + { + next; + } + + # ---------------------------------------------------------------------- + # Skip some tests but include in list, just mark them to skip + # ---------------------------------------------------------------------- + + my $tinfo= {}; + $tinfo->{'name'}= $tname; + $tinfo->{'result_file'}= "$resdir/$tname.result"; + push(@tests, $tinfo); + + if ( $opt_skip_test and defined mtr_match_prefix($tname,$opt_skip_test) ) + { + $tinfo->{'skip'}= 1; + next; + } + + # FIXME temporary solution, we have a hard coded list of test cases to + # skip if we are using the embedded server + + if ( $glob_use_embedded_server and + mtr_match_any_exact($tname,\@skip_if_embedded_server) ) + { + $tinfo->{'skip'}= 1; + next; + } + + # ---------------------------------------------------------------------- + # Collect information about test case + # ---------------------------------------------------------------------- + + $tinfo->{'path'}= $path; + + if ( defined mtr_match_prefix($tname,"rpl") ) + { + if ( $opt_skip_rpl ) + { + $tinfo->{'skip'}= 1; + next; + } + + # FIXME currently we always restart slaves + $tinfo->{'slave_restart'}= 1; + + if ( $tname eq 'rpl_failsafe' or $tname eq 'rpl_chain_temp_table' ) + { + $tinfo->{'slave_num'}= 3; + } + else + { + $tinfo->{'slave_num'}= 1; + } + } + + # FIXME what about embedded_server + ndbcluster, skip ?! + + my $master_opt_file= "$testdir/$tname-master.opt"; + my $slave_opt_file= "$testdir/$tname-slave.opt"; + my $slave_mi_file= "$testdir/$tname.slave-mi"; + my $master_sh= "$testdir/$tname-master.sh"; + my $slave_sh= "$testdir/$tname-slave.sh"; + + if ( -f $master_opt_file ) + { + $tinfo->{'master_restart'}= 1; # We think so for now + # This is a dirty hack from old mysql-test-run, we use the opt file + # to flag other things as well, it is not a opt list at all + my $extra_master_opt= mtr_get_opts_from_file($master_opt_file); + + foreach my $opt (@$extra_master_opt) + { + my $value; + + $value= mtr_match_prefix($opt, "--timezone="); + + if ( defined $value ) + { + $ENV{'TZ'}= $value; # FIXME pass this on somehow.... + $extra_master_opt= []; + $tinfo->{'master_restart'}= 0; + last; + } + + $value= mtr_match_prefix($opt, "--result-file="); + + if ( defined $value ) + { + $tinfo->{'result_file'}= "r/$value.result"; + if ( $opt_result_ext and $opt_record or + -f "$tinfo->{'result_file'}$opt_result_ext") + { + $tinfo->{'result_file'}.= $opt_result_ext; + } + $extra_master_opt= []; + $tinfo->{'master_restart'}= 0; + last; + } + } + + $tinfo->{'master_opt'}= $extra_master_opt; + } + + if ( -f $slave_opt_file ) + { + $tinfo->{'slave_opt'}= mtr_get_opts_from_file($slave_opt_file); + $tinfo->{'slave_restart'}= 1; + } + + if ( -f $slave_mi_file ) + { + $tinfo->{'slave_mi'}= mtr_get_opts_from_file($slave_mi_file); + $tinfo->{'slave_restart'}= 1; + } + + if ( -f $master_sh ) + { + if ( $glob_win32_perl ) + { + $tinfo->{'skip'}= 1; + } + else + { + $tinfo->{'master_sh'}= $master_sh; + $tinfo->{'master_restart'}= 1; + } + } + + if ( -f $slave_sh ) + { + if ( $glob_win32_perl ) + { + $tinfo->{'skip'}= 1; + } + else + { + $tinfo->{'slave_sh'}= $slave_sh; + $tinfo->{'slave_restart'}= 1; + } + } + + # We can't restart a running server that may be in use + + if ( $glob_use_running_server and + ( $tinfo->{'master_restart'} or $tinfo->{'slave_restart'} ) ) + { + $tinfo->{'skip'}= 1; + } + + } + + closedir TESTDIR; + + return \@tests; +} + + +############################################################################## +# +# Handle left overs from previous runs +# +############################################################################## + +sub kill_and_cleanup () { + + if ( $opt_fast or $glob_use_embedded_server ) + { + # FIXME is embedded server really using PID files?! + unlink($master->[0]->{'path_mypid'}); + unlink($master->[1]->{'path_mypid'}); + unlink($slave->[0]->{'path_mypid'}); + unlink($slave->[1]->{'path_mypid'}); + unlink($slave->[2]->{'path_mypid'}); + } + else + { + # Ensure that no old mysqld test servers are running + # This is different from terminating processes we have + # started from ths run of the script, this is terminating + # leftovers from previous runs. + + mtr_report("Killing Possible Leftover Processes"); + mtr_kill_leftovers(); + } + + if ( $opt_with_ndbcluster and ! $glob_use_running_ndbcluster ) + { + ndbcluster_stop(); + } + + mtr_report("Removing Stale Files"); + + rmtree("$glob_mysql_test_dir/var/log"); + rmtree("$glob_mysql_test_dir/var/ndbcluster"); + rmtree("$glob_mysql_test_dir/var/run"); + rmtree("$glob_mysql_test_dir/var/tmp"); + + mkpath("$glob_mysql_test_dir/var/log"); + mkpath("$glob_mysql_test_dir/var/ndbcluster"); + mkpath("$glob_mysql_test_dir/var/run"); + mkpath("$glob_mysql_test_dir/var/tmp"); + mkpath($opt_tmpdir); + + rmtree("$master->[0]->{'path_myddir'}"); + mkpath("$master->[0]->{'path_myddir'}/mysql"); # Need to create subdir?! + mkpath("$master->[0]->{'path_myddir'}/test"); + + rmtree("$master->[1]->{'path_myddir'}"); + mkpath("$master->[1]->{'path_myddir'}/mysql"); # Need to create subdir?! + mkpath("$master->[1]->{'path_myddir'}/test"); + + rmtree("$slave->[0]->{'path_myddir'}"); + mkpath("$slave->[0]->{'path_myddir'}/mysql"); # Need to create subdir?! + mkpath("$slave->[0]->{'path_myddir'}/test"); + + rmtree("$slave->[1]->{'path_myddir'}"); + mkpath("$slave->[1]->{'path_myddir'}/mysql"); # Need to create subdir?! + mkpath("$slave->[1]->{'path_myddir'}/test"); + + rmtree("$slave->[2]->{'path_myddir'}"); + mkpath("$slave->[2]->{'path_myddir'}/mysql"); # Need to create subdir?! + mkpath("$slave->[2]->{'path_myddir'}/test"); + + $opt_wait_for_master= $opt_sleep_time_for_first_master; + $opt_wait_for_slave= $opt_sleep_time_for_first_slave; +} + + +# FIXME + +sub sleep_until_file_created ($$) { + my $pidfile= shift; + my $timeout= shift; + + my $loop= $timeout * 2; + while ( $loop-- ) + { + if ( -r $pidfile ) + { + return; + } + mtr_debug("Sleep for 1 second waiting for creation of $pidfile"); + sleep(1); + } + + if ( ! -r $pidfile ) + { + mtr_error("No $pidfile was created"); + } +} + + +############################################################################## +# +# Start the ndb cluster +# +############################################################################## + +# FIXME why is there a different start below?! + +sub ndbcluster_start () { + + mtr_report("Starting ndbcluster"); + my $ndbcluster_opts= $opt_bench ? "" : "--small"; + # FIXME check result code?! + mtr_run("$glob_mysql_test_dir/ndb/ndbcluster", + ["--port-base=$opt_ndbcluster_port", + $ndbcluster_opts, + "--diskless", + "--initial", + "--data-dir=$glob_mysql_test_dir/var"], + "", "", "", ""); +} + +sub ndbcluster_stop () { + mtr_run("$glob_mysql_test_dir/ndb/ndbcluster", + ["--data-dir=$glob_mysql_test_dir/var", + "--port-base=$opt_ndbcluster_port", + "--stop"], + "", "", "", ""); +} + + +############################################################################## +# +# Run the benchmark suite +# +############################################################################## + +sub run_benchmarks ($) { + my $benchmark= shift; + + my $args; + + if ( ! $glob_use_embedded_server and ! $opt_local_master ) + { + $master->[0]->{'pid'}= mysqld_start('master',0,[],[]); + } + + mtr_init_args(\$args); + + mtr_add_arg($args, "--socket=%s", $master->[0]->{'path_mysock'}); + mtr_add_arg($args, "--user=root"); + + if ( $opt_small_bench ) + { + mtr_add_arg($args, "--small-test"); + mtr_add_arg($args, "--small-tables"); + } + + if ( $opt_with_ndbcluster ) + { + mtr_add_arg($args, "--create-options=TYPE=ndb"); + } + + my $benchdir= "$glob_basedir/sql-bench"; + chdir($benchdir); # FIXME check error + + # FIXME write shorter.... + + if ( ! $benchmark ) + { + mtr_add_arg($args, "--log"); + mtr_run("$glob_mysql_bench_dir/run-all-tests", $args, "", "", "", ""); + # FIXME check result code?! + } + elsif ( -x $benchmark ) + { + mtr_run("$glob_mysql_bench_dir/$benchmark", $args, "", "", "", ""); + # FIXME check result code?! + } + else + { + mtr_error("Benchmark $benchmark not found"); + } + + chdir($glob_mysql_test_dir); # Go back + + if ( ! $glob_use_embedded_server ) + { + stop_masters(); + } +} + + +############################################################################## +# +# Run the test suite +# +############################################################################## + +# FIXME how to specify several suites to run? Comma separated list? + +sub run_tests () { + run_suite($opt_suite); +} + +sub run_suite () { + my $suite= shift; + + mtr_print_thick_line(); + + mtr_report("Finding Tests in $suite suite"); + + my $tests= collect_test_cases($suite); + + mtr_report("Starting Tests in $suite suite"); + + mtr_print_header(); + + foreach my $tinfo ( @$tests ) + { + run_testcase($tinfo); + } + + mtr_print_line(); + + if ( ! $opt_gdb and ! $glob_use_running_server and + ! $opt_ddd and ! $glob_use_embedded_server ) + { + stop_masters_slaves(); + } + + if ( $opt_with_ndbcluster and ! $glob_use_running_ndbcluster ) + { + ndbcluster_stop(); + } + + if ( $opt_gcov ) + { + gcov_collect(); # collect coverage information + } + if ( $opt_gprof ) + { + gprof_collect(); # collect coverage information + } + + mtr_report_stats($tests); +} + + +############################################################################## +# +# Initiate the test databases +# +############################################################################## + +sub mysql_install_db () { + + mtr_report("Installing Test Databases"); + + install_db('master', $master->[0]->{'path_myddir'}); + install_db('slave', $slave->[0]->{'path_myddir'}); + + return 0; +} + + +sub install_db ($$) { + my $type= shift; + my $data_dir= shift; + + my $init_db_sql= "lib/init_db.sql"; # FIXME this is too simple maybe + my $args; + + mtr_report("Installing \u$type Databases"); + + mtr_init_args(\$args); + + mtr_add_arg($args, "--no-defaults"); + mtr_add_arg($args, "--bootstrap"); + mtr_add_arg($args, "--skip-grant-tables"); + mtr_add_arg($args, "--basedir=%s", $path_my_basedir); + mtr_add_arg($args, "--datadir=%s", $data_dir); + mtr_add_arg($args, "--skip-innodb"); + mtr_add_arg($args, "--skip-ndbcluster"); + mtr_add_arg($args, "--skip-bdb"); + + if ( ! $opt_netware ) + { + mtr_add_arg($args, "--language=%s", $path_language); + mtr_add_arg($args, "--character-sets-dir=%s", $path_charsetsdir); + } + + if ( mtr_run($exe_mysqld, $args, $init_db_sql, + $path_manager_log, $path_manager_log, "") != 0 ) + { + mtr_error("Error executing mysqld --bootstrap\n" . + "Could not install $type test DBs"); + } +} + + +############################################################################## +# +# Run a single test case +# +############################################################################## + +# When we get here, we have already filtered out test cases that doesn't +# apply to the current setup, for example if we use a running server, test +# cases that restart the server are dropped. So this function should mostly +# be about doing things, not a lot of logic. + +# We don't start and kill the servers for each testcase. But some +# testcases needs a restart, because they specify options to start +# mysqld with. After that testcase, we need to restart again, to set +# back the normal options. + +sub run_testcase ($) { + my $tinfo= shift; + + my $tname= $tinfo->{'name'}; + + mtr_tonewfile($opt_current_test,"$tname\n"); # Always tell where we are + + # ---------------------------------------------------------------------- + # If marked to skip, just print out and return. + # Note that a test case not marked as 'skip' can still be + # skipped later, because of the test case itself in cooperation + # with the mysqltest program tells us so. + # ---------------------------------------------------------------------- + + if ( $tinfo->{'skip'} ) + { + mtr_report_test_name($tinfo); + mtr_report_test_skipped($tinfo); + return; + } + + # ---------------------------------------------------------------------- + # If not using a running servers we may need to stop and restart. + # We restart in the case we have initiation scripts, server options + # etc to run. But we also restart again after the test first restart + # and test is run, to get back to normal server settings. + # + # To make the code a bit more clean, we actually only stop servers + # here, and mark this to be done. Then a generic "start" part will + # start up the needed servers again. + # ---------------------------------------------------------------------- + + if ( ! $glob_use_running_server and ! $glob_use_embedded_server ) + { + if ( $tinfo->{'master_restart'} or $master->[0]->{'uses_special_flags'} ) + { + stop_masters(); + $master->[0]->{'uses_special_flags'}= 0; # Forget about why we stopped + } + + # ---------------------------------------------------------------------- + # Always terminate all slaves, if any. Else we may have useless + # reconnection attempts and error messages in case the slave and + # master servers restart. + # ---------------------------------------------------------------------- + + stop_slaves(); + } + + # ---------------------------------------------------------------------- + # Prepare to start masters. Even if we use embedded, we want to run + # the preparation. + # ---------------------------------------------------------------------- + + mtr_report_test_name($tinfo); + + mtr_tofile($master->[0]->{'path_myerr'},"CURRENT_TEST: $tname\n"); + do_before_start_master($tname,$tinfo->{'master_sh'}); + + # ---------------------------------------------------------------------- + # Start masters + # ---------------------------------------------------------------------- + + if ( ! $glob_use_running_server and ! $glob_use_embedded_server ) + { + # FIXME give the args to the embedded server?! + # FIXME what does $opt_local_master mean?! + # FIXME split up start and check that started so that can do + # starts in parallel, masters and slaves at the same time. + + if ( ! $opt_local_master ) + { + if ( ! $master->[0]->{'pid'} ) + { + $master->[0]->{'pid'}= + mysqld_start('master',0,$tinfo->{'master_opt'},[]); + } + if ( $opt_with_ndbcluster and ! $master->[1]->{'pid'} ) + { + $master->[1]->{'pid'}= + mysqld_start('master',1,$tinfo->{'master_opt'},[]); + } + + if ( $tinfo->{'master_opt'} ) + { + $master->[0]->{'uses_special_flags'}= 1; + } + } + + # ---------------------------------------------------------------------- + # Start slaves - if needed + # ---------------------------------------------------------------------- + + if ( $tinfo->{'slave_num'} ) + { + mtr_tofile($slave->[0]->{'path_myerr'},"CURRENT_TEST: $tname\n"); + + do_before_start_slave($tname,$tinfo->{'slave_sh'}); + + for ( my $idx= 0; $idx < $tinfo->{'slave_num'}; $idx++ ) + { + if ( ! $slave->[$idx]->{'pid'} ) + { + $slave->[$idx]->{'pid'}= + mysqld_start('slave',$idx, + $tinfo->{'slave_opt'}, $tinfo->{'slave_mi'}); + } + } + } + } + + # ---------------------------------------------------------------------- + # Run the test case + # ---------------------------------------------------------------------- + + { + unlink("r/$tname.reject"); + unlink($path_timefile); + + my $res= run_mysqltest($tinfo, $tinfo->{'master_opt'}); + + if ( $res == 0 ) + { + mtr_report_test_passed($tinfo); + } + elsif ( $res == 2 ) + { + # Testcase itself tell us to skip this one + mtr_report_test_skipped($tinfo); + } + else + { + # Test case failed + if ( $res > 2 ) + { + mtr_tofile($path_timefile, + "mysqltest returned unexpected code $res, " . + "it has probably crashed"); + } + mtr_report_test_failed($tinfo); + mtr_show_failed_diff($tname); + print "\n"; + if ( ! $opt_force ) + { + print "Aborting: $tname failed. To continue, re-run with '--force'."; + print "\n"; + if ( ! $opt_gdb and ! $glob_use_running_server and + ! $opt_ddd and ! $glob_use_embedded_server ) + { + stop_masters_slaves(); + } + exit(1); + } + + # FIXME always terminate on failure?! + if ( ! $opt_gdb and ! $glob_use_running_server and + ! $opt_ddd and ! $glob_use_embedded_server ) + { + stop_masters_slaves(); + } + print "Resuming Tests\n\n"; + } + } +} + + +############################################################################## +# +# Start and stop servers +# +############################################################################## + +# The embedded server needs the cleanup so we do some of the start work +# but stop before actually running mysqld or anything. + +sub do_before_start_master ($$) { + my $tname= shift; + my $master_init_script= shift; + + # FIXME what about second master..... + + # Remove stale binary logs except for 2 tests which need them FIXME here???? + if ( $tname ne "rpl_crash_binlog_ib_1b" and + $tname ne "rpl_crash_binlog_ib_2b" and + $tname ne "rpl_crash_binlog_ib_3b") + { + # FIXME we really want separate dir for binlogs + `rm -fr $glob_mysql_test_dir/var/log/master-bin.*`; +# unlink("$glob_mysql_test_dir/var/log/master-bin.*"); + } + + # Remove old master.info and relay-log.info files + unlink("$glob_mysql_test_dir/var/master-data/master.info"); + unlink("$glob_mysql_test_dir/var/master-data/relay-log.info"); + unlink("$glob_mysql_test_dir/var/master1-data/master.info"); + unlink("$glob_mysql_test_dir/var/master1-data/relay-log.info"); + + #run master initialization shell script if one exists + + if ( $master_init_script and + mtr_run($master_init_script, [], "", "", "", "") != 0 ) + { + mtr_error("Can't run $master_init_script"); + } + # for gcov FIXME needed? If so we need more absolute paths +# chdir($glob_basedir); +} + +sub do_before_start_slave ($$) { + my $tname= shift; + my $slave_init_script= shift; + + # When testing fail-safe replication, we will have more than one slave + # in this case, we start secondary slaves with an argument + + # Remove stale binary logs and old master.info files + # except for too tests which need them + if ( $tname ne "rpl_crash_binlog_ib_1b" and + $tname ne "rpl_crash_binlog_ib_2b" and + $tname ne "rpl_crash_binlog_ib_3b" ) + { + # FIXME we really want separate dir for binlogs + `rm -fr $glob_mysql_test_dir/var/log/slave*-bin.*`; +# unlink("$glob_mysql_test_dir/var/log/slave*-bin.*"); # FIXME idx??? + # FIXME really master?! + unlink("$glob_mysql_test_dir/var/slave-data/master.info"); + unlink("$glob_mysql_test_dir/var/slave-data/relay-log.info"); + } + + #run slave initialization shell script if one exists + if ( $slave_init_script and + mtr_run($slave_init_script, [], "", "", "", "") != 0 ) + { + mtr_error("Can't run $slave_init_script"); + } + + unlink("$glob_mysql_test_dir/var/slave-data/log.*"); +} + +sub mysqld_arguments ($$$$$) { + my $args= shift; + my $type= shift; # master/slave/bootstrap + my $idx= shift; + my $extra_opt= shift; + my $slave_master_info= shift; + + my $sidx= ""; # Index as string, 0 is empty string + if ( $idx > 0 ) + { + $sidx= sprintf("%d", $idx); # sprintf not needed in Perl for this + } + + my $prefix= ""; # If mysqltest server arg + + if ( $glob_use_embedded_server ) + { + $prefix= "--server-arg="; + } else { + # We can't pass embedded server --no-defaults + mtr_add_arg($args, "%s--no-defaults", $prefix); + } + + mtr_add_arg($args, "%s--basedir=%s", $prefix, $path_my_basedir); + mtr_add_arg($args, "%s--character-sets-dir=%s", $prefix, $path_charsetsdir); + mtr_add_arg($args, "%s--core", $prefix); + mtr_add_arg($args, "%s--default-character-set=latin1", $prefix); + mtr_add_arg($args, "%s--language=%s", $prefix, $path_language); + mtr_add_arg($args, "%s--tmpdir=$opt_tmpdir", $prefix); + + if ( $opt_valgrind ) + { + mtr_add_arg($args, "%s--skip-safemalloc", $prefix); + mtr_add_arg($args, "%s--skip-bdb", $prefix); + } + + my $pidfile; + + if ( $type eq 'master' ) + { + mtr_add_arg($args, "%s--log-bin=%s/var/log/master-bin", $prefix, + $glob_mysql_test_dir); + mtr_add_arg($args, "%s--pid-file=%s", $prefix, + $master->[$idx]->{'path_mypid'}); + mtr_add_arg($args, "%s--port=%d", $prefix, + $master->[$idx]->{'path_myport'}); + mtr_add_arg($args, "%s--server-id=1", $prefix); + mtr_add_arg($args, "%s--socket=%s", $prefix, + $master->[$idx]->{'path_mysock'}); + mtr_add_arg($args, "%s--innodb_data_file_path=ibdata1:50M", $prefix); + mtr_add_arg($args, "%s--local-infile", $prefix); + mtr_add_arg($args, "%s--datadir=%s", $prefix, + $master->[$idx]->{'path_myddir'}); + } + + if ( $type eq 'slave' ) + { + my $slave_server_id= 2 + $idx; + my $slave_rpl_rank= $idx > 0 ? 2 : $slave_server_id; + + mtr_add_arg($args, "%s--datadir=%s", $prefix, + $slave->[$idx]->{'path_myddir'}); + mtr_add_arg($args, "%s--exit-info=256", $prefix); + mtr_add_arg($args, "%s--init-rpl-role=slave", $prefix); + mtr_add_arg($args, "%s--log-bin=%s/var/log/slave%s-bin", $prefix, + $glob_mysql_test_dir, $sidx); # FIXME use own dir for binlogs + mtr_add_arg($args, "%s--log-slave-updates", $prefix); + mtr_add_arg($args, "%s--log=%s", $prefix, + $slave->[$idx]->{'path_myerr'}); + mtr_add_arg($args, "%s--master-retry-count=10", $prefix); + mtr_add_arg($args, "%s--pid-file=%s", $prefix, + $slave->[$idx]->{'path_mypid'}); + mtr_add_arg($args, "%s--port=%d", $prefix, + $slave->[$idx]->{'path_myport'}); + mtr_add_arg($args, "%s--relay-log=%s/var/log/slave%s-relay-bin", $prefix, + $glob_mysql_test_dir, $sidx); + mtr_add_arg($args, "%s--report-host=127.0.0.1", $prefix); + mtr_add_arg($args, "%s--report-port=%d", $prefix, + $slave->[$idx]->{'path_myport'}); + mtr_add_arg($args, "%s--report-user=root", $prefix); + mtr_add_arg($args, "%s--skip-innodb", $prefix); + mtr_add_arg($args, "%s--skip-ndbcluster", $prefix); + mtr_add_arg($args, "%s--skip-slave-start", $prefix); + mtr_add_arg($args, "%s--slave-load-tmpdir=%s", $prefix, + $path_slave_load_tmpdir); + mtr_add_arg($args, "%s--socket=%s", $prefix, + $slave->[$idx]->{'path_mysock'}); + mtr_add_arg($args, "%s--set-variable=slave_net_timeout=10", $prefix); + + if ( @$slave_master_info ) + { + foreach my $arg ( @$slave_master_info ) + { + mtr_add_arg($args, "%s%s", $prefix, $arg); + } + } + else + { + mtr_add_arg($args, "%s--master-user=root", $prefix); + mtr_add_arg($args, "%s--master-connect-retry=1", $prefix); + mtr_add_arg($args, "%s--master-host=127.0.0.1", $prefix); + mtr_add_arg($args, "%s--master-password=", $prefix); + mtr_add_arg($args, "%s--master-port=%d", $prefix, + $master->[0]->{'path_myport'}); # First master + mtr_add_arg($args, "%s--server-id=%d", $prefix, $slave_server_id); + mtr_add_arg($args, "%s--rpl-recovery-rank=%d", $prefix, $slave_rpl_rank); + } + } # end slave + + if ( $opt_debug ) + { + if ( $type eq 'master' ) + { + mtr_add_arg($args, "--debug=d:t:i:A,%s/var/log/master%s.trace", + $prefix, $glob_mysql_test_dir, $sidx); + } + if ( $type eq 'slave' ) + { + mtr_add_arg($args, "--debug=d:t:i:A,%s/var/log/slave%s.trace", + $prefix, $glob_mysql_test_dir, $sidx); + } + } + + if ( $opt_with_ndbcluster ) + { + mtr_add_arg($args, "%s--ndbcluster", $prefix); + + if ( $glob_use_running_ndbcluster ) + { + mtr_add_arg($args,"--ndb-connectstring=%s", $prefix, + $opt_ndbconnectstring); + } + else + { + mtr_add_arg($args,"--ndb-connectstring=host=localhost:%d", + $prefix, $opt_ndbcluster_port); + } + } + + # FIXME always set nowdays??? SMALL_SERVER + mtr_add_arg($args, "%s--key_buffer_size=1M", $prefix); + mtr_add_arg($args, "%s--sort_buffer=256K", $prefix); + mtr_add_arg($args, "%s--max_heap_table_size=1M", $prefix); + + if ( $opt_with_openssl ) + { + mtr_add_arg($args, "%s--ssl-ca=%s/SSL/cacert.pem", $prefix, $glob_basedir); + mtr_add_arg($args, "%s--ssl-cert=%s/SSL/server-cert.pem", $prefix, + $glob_basedir); + mtr_add_arg($args, "%s--ssl-key=%s/SSL/server-key.pem", $prefix, + $glob_basedir); + } + + if ( $opt_warnings ) + { + mtr_add_arg($args, "%s--log-warnings", $prefix); + } + + if ( $opt_gdb or $opt_client_gdb or $opt_manual_gdb or $opt_ddd) + { + mtr_add_arg($args, "%s--gdb", $prefix); + } + + # If we should run all tests cases, we will use a local server for that + + if ( -w "/" ) + { + # We are running as root; We need to add the --root argument + mtr_add_arg($args, "%s--user=root", $prefix); + } + + if ( $type eq 'master' ) + { + + if ( ! $opt_old_master ) + { + mtr_add_arg($args, "%s--rpl-recovery-rank=1", $prefix); + mtr_add_arg($args, "%s--init-rpl-role=master", $prefix); + } + + # FIXME strange,..... + if ( $opt_local_master ) + { + mtr_add_arg($args, "%s--host=127.0.0.1", $prefix); + mtr_add_arg($args, "%s--port=%s", $prefix, $ENV{'MYSQL_MYPORT'}); + } + } + + foreach my $arg ( @$extra_opt ) + { + mtr_add_arg($args, "%s%s", $prefix, $arg); + } + + if ( $opt_bench ) + { + mtr_add_arg($args, "%s--rpl-recovery-rank=1", $prefix); + mtr_add_arg($args, "%s--init-rpl-role=master", $prefix); + } + else + { + mtr_add_arg($args, "%s--exit-info=256", $prefix); + mtr_add_arg($args, "%s--open-files-limit=1024", $prefix); + + if ( $type eq 'master' ) + { + mtr_add_arg($args, "%s--log=%s", $prefix, $master->[0]->{'path_mylog'}); + } + if ( $type eq 'slave' ) + { + mtr_add_arg($args, "%s--log=%s", $prefix, $slave->[0]->{'path_mylog'}); + } + } + + return $args; +} + +# FIXME +# if ( $type eq 'master' and $glob_use_embedded_server ) +# { +# # Add a -A to each argument to pass it to embedded server +# my @mysqltest_opt= map {("-A",$_)} @args; +# $opt_extra_mysqltest_opt= \@mysqltest_opt; +# return; +# } + +############################################################################## +# +# Start mysqld and return the PID +# +############################################################################## + +sub mysqld_start ($$$$) { + my $type= shift; # master/slave/bootstrap + my $idx= shift; + my $extra_opt= shift; + my $slave_master_info= shift; + + my $args; # Arg vector + my $exe; + my $pid; + + # FIXME code duplication, make up your mind.... + if ( $opt_source_dist ) + { + $exe= "$glob_basedir/sql/mysqld"; + } + else + { + $exe ="$glob_basedir/libexec/mysqld"; + if ( ! -x $exe ) + { + $exe ="$glob_basedir/bin/mysqld"; + } + } + + mtr_init_args(\$args); + + if ( $opt_valgrind ) + { + + mtr_add_arg($args, "--tool=memcheck"); + mtr_add_arg($args, "--alignment=8"); + mtr_add_arg($args, "--leak-check=yes"); + mtr_add_arg($args, "--num-callers=16"); + + if ( $opt_valgrind_all ) + { + mtr_add_arg($args, "-v"); + mtr_add_arg($args, "--show-reachable=yes"); + } + + if ( $opt_valgrind_options ) + { + # FIXME split earlier and put into @glob_valgrind_* + mtr_add_arg($args, split(' ', $opt_valgrind_options)); + } + + mtr_add_arg($args, $exe); + + $exe= $opt_valgrind; + } + + mysqld_arguments($args,$type,$idx,$extra_opt,$slave_master_info); + + if ( $type eq 'master' ) + { + if ( $pid= mtr_spawn($exe, $args, "", + $master->[$idx]->{'path_myerr'}, + $master->[$idx]->{'path_myerr'}, "") ) + { + sleep_until_file_created($master->[$idx]->{'path_mypid'}, + $opt_wait_for_master); + $opt_wait_for_master= $opt_sleep_time_for_second_master; + return $pid; + } + } + + if ( $type eq 'slave' ) + { + if ( $pid= mtr_spawn($exe, $args, "", + $slave->[$idx]->{'path_myerr'}, + $slave->[$idx]->{'path_myerr'}, "") ) + { + sleep_until_file_created($slave->[$idx]->{'path_mypid'}, + $opt_wait_for_slave); + $opt_wait_for_slave= $opt_sleep_time_for_second_slave; + return $pid; + } + } + + mtr_error("Can't start mysqld FIXME"); +} + +sub stop_masters_slaves () { + + print "Ending Tests\n"; + print "Shutting-down MySQL daemon\n\n"; + stop_masters(); + print "Master(s) shutdown finished\n"; + stop_slaves(); + print "Slave(s) shutdown finished\n"; +} + +sub stop_masters () { + + my @args; + + for ( my $idx; $idx < 2; $idx++ ) + { + # FIXME if we hit ^C before fully started, this test will prevent + # the mysqld process from being killed + if ( $master->[$idx]->{'pid'} ) + { + push(@args,{ + pid => $master->[$idx]->{'pid'}, + pidfile => $master->[$idx]->{'path_mypid'}, + sockfile => $master->[$idx]->{'path_mysock'}, + port => $master->[$idx]->{'path_myport'}, + }); + $master->[$idx]->{'pid'}= 0; # Assume we are done with it + } + } + + mtr_stop_mysqld_servers(\@args, 0); +} + +sub stop_slaves () { + my $force= shift; + + my @args; + + for ( my $idx; $idx < 3; $idx++ ) + { + if ( $slave->[$idx]->{'pid'} ) + { + push(@args,{ + pid => $slave->[$idx]->{'pid'}, + pidfile => $slave->[$idx]->{'path_mypid'}, + sockfile => $slave->[$idx]->{'path_mysock'}, + port => $slave->[$idx]->{'path_myport'}, + }); + $slave->[$idx]->{'pid'}= 0; # Assume we are done with it + } + } + + mtr_stop_mysqld_servers(\@args, 0); +} + + +sub run_mysqltest ($$) { + my $tinfo= shift; + my $master_opts= shift; + + # FIXME set where???? + my $cmdline_mysqldump= "$exe_mysqldump --no-defaults -uroot " . + "--socket=$master->[0]->{'path_mysock'} --password="; + if ( $opt_debug ) + { + $cmdline_mysqldump .= + " --debug=d:t:A,$glob_mysql_test_dir/var/log/mysqldump.trace"; + } + + my $cmdline_mysqlbinlog= + "$exe_mysqlbinlog --no-defaults --local-load=$opt_tmpdir"; + + if ( $opt_debug ) + { + $cmdline_mysqlbinlog .= + " --debug=d:t:A,$glob_mysql_test_dir/var/log/mysqlbinlog.trace"; + } + + my $cmdline_mysql= + "$exe_mysql --host=localhost --port=$master->[0]->{'path_myport'} " . + "--socket=$master->[0]->{'path_mysock'} --user=root --password="; + + $ENV{'MYSQL'}= $exe_mysql; + $ENV{'MYSQL_DUMP'}= $cmdline_mysqldump; + $ENV{'MYSQL_BINLOG'}= $exe_mysqlbinlog; + $ENV{'CLIENT_BINDIR'}= $path_client_bindir; + $ENV{'TESTS_BINDIR'}= $path_tests_bindir; + + my $exe= $exe_mysqltest; + my $args; + + mtr_init_args(\$args); + + mtr_add_arg($args, "--no-defaults"); + mtr_add_arg($args, "--socket=%s", $master->[0]->{'path_mysock'}); + mtr_add_arg($args, "--database=test"); + mtr_add_arg($args, "--user=%s", $glob_user); + mtr_add_arg($args, "--password="); + mtr_add_arg($args, "--silent"); + mtr_add_arg($args, "-v"); + mtr_add_arg($args, "--skip-safemalloc"); + mtr_add_arg($args, "--tmpdir=%s", $opt_tmpdir); + mtr_add_arg($args, "--port=%d", $master->[0]->{'path_myport'}); + + if ( $opt_ps_protocol ) + { + mtr_add_arg($args, "--ps-protocol"); + } + + if ( $opt_strace_client ) + { + $exe= "strace"; # FIXME there are ktrace, .... + mtr_add_arg($args, "-o"); + mtr_add_arg($args, "%s/var/log/mysqltest.strace", $glob_mysql_test_dir); + mtr_add_arg($args, "$exe_mysqltest"); + } + + if ( $opt_timer ) + { + mtr_add_arg($args, "--timer-file=var/log/timer"); + } + + if ( $opt_big_test ) + { + mtr_add_arg($args, "--big-test"); + } + + if ( $opt_record ) + { + mtr_add_arg($args, "--record"); + } + + if ( $opt_compress ) + { + mtr_add_arg($args, "--compress"); + } + + if ( $opt_sleep ) + { + mtr_add_arg($args, "--sleep=%d", $opt_sleep); + } + + if ( $opt_debug ) + { + mtr_add_arg($args, "--debug=d:t:A,%s/var/log/mysqltest.trace", + $glob_mysql_test_dir); + } + + if ( $opt_with_openssl ) + { + mtr_add_arg($args, "--ssl-ca=%s/SSL/cacert.pem", $glob_basedir); + mtr_add_arg($args, "--ssl-cert=%s/SSL/client-cert.pem", $glob_basedir); + mtr_add_arg($args, "--ssl-key=%s/SSL/client-key.pem", $glob_basedir); + } + + mtr_add_arg($args, "-R"); + mtr_add_arg($args, $tinfo->{'result_file'}); + + # ---------------------------------------------------------------------- + # If embedded server, we create server args to give mysqltest to pass on + # ---------------------------------------------------------------------- + + if ( $glob_use_embedded_server ) + { + mysqld_arguments($args,'master',0,$tinfo->{'master_opt'},[]); + } + + return mtr_run($exe_mysqltest,$args,$tinfo->{'path'},"",$path_timefile,""); +} + +############################################################################## +# +# Usage +# +############################################################################## + +sub usage ($) +{ + print STDERR < #endif #ifdef __WIN__ -#include +#include +#include #include #endif @@ -89,15 +90,25 @@ static char master_socket[FN_REFLEN]= "./var/tmp/master.sock"; static char slave_socket[FN_REFLEN]= "./var/tmp/slave.sock"; #endif +#define MAX_COUNT_TESTES 1024 + +#ifdef __WIN__ +# define sting_compare_func _stricmp +#else +# ifdef HAVE_STRCASECMP +# define sting_compare_func strcasecmp +# else +# define sting_compare_func strcmp +# endif +#endif + /* comma delimited list of tests to skip or empty string */ #ifndef __WIN__ static char skip_test[FN_REFLEN]= " lowercase_table3 , system_mysql_db_fix "; #else /* The most ignore testes contain the calls of system command -*/ -#define MAX_COUNT_TESTES 1024 -/* + lowercase_table3 is disabled by Gerg system_mysql_db_fix is disabled by Gerg sp contains a command system @@ -1437,12 +1448,11 @@ void setup(char *file __attribute__((unused))) /* Compare names of testes for right order */ -#ifdef __WIN__ int compare( const void *arg1, const void *arg2 ) { - return _stricmp( * ( char** ) arg1, * ( char** ) arg2 ); + return sting_compare_func( * ( char** ) arg1, * ( char** ) arg2 ); } -#endif + /****************************************************************************** @@ -1454,6 +1464,10 @@ int compare( const void *arg1, const void *arg2 ) int main(int argc, char **argv) { int is_ignore_list= 0; + char **names= 0; + char **testes= 0; + int name_index; + int index; /* setup */ setup(argv[0]); @@ -1517,6 +1531,11 @@ int main(int argc, char **argv) else { /* run all tests */ + testes= malloc(MAX_COUNT_TESTES*sizeof(void*)); + if (!testes) + die("can not allcate memory for sorting"); + names= testes; + name_index= 0; #ifndef __WIN__ struct dirent *entry; DIR *parent; @@ -1534,74 +1553,79 @@ int main(int argc, char **argv) /* find the test suffix */ if ((position= strinstr(test, TEST_SUFFIX)) != 0) { - /* null terminate at the suffix */ - *(test + position - 1)= '\0'; - /* run test */ - run_test(test); + if (name_index < MAX_COUNT_TESTES) + { + /* null terminate at the suffix */ + *(test + position - 1)= '\0'; + /* insert test */ + *names= malloc(FN_REFLEN); + strcpy(*names,test); + names++; + name_index++; + } + else + die("can not sort files, array is overloaded"); } } closedir(parent); } #else - struct _finddata_t dir; - intptr_t handle; - char test[FN_LEN]; - char mask[FN_REFLEN]; - char *p; - int position; - char **names= 0; - char **testes= 0; - int name_index; - int index; - - /* single test */ - single_test= FALSE; - - snprintf(mask,FN_REFLEN,"%s/*.test",test_dir); - - if ((handle=_findfirst(mask,&dir)) == -1L) { - die("Unable to open tests directory."); - } + struct _finddata_t dir; + int* handle; + char test[FN_LEN]; + char mask[FN_REFLEN]; + char *p; + int position; - names= malloc(MAX_COUNT_TESTES*4); - testes= names; - name_index= 0; + /* single test */ + single_test= FALSE; - do - { - if (!(dir.attrib & _A_SUBDIR)) + snprintf(mask,FN_REFLEN,"%s/*.test",test_dir); + + if ((handle=_findfirst(mask,&dir)) == -1L) { - strcpy(test, strlwr(dir.name)); - - /* find the test suffix */ - if ((position= strinstr(test, TEST_SUFFIX)) != 0) - { - p= test + position - 1; - /* null terminate at the suffix */ - *p= 0; - - /* insert test */ - *names= malloc(FN_REFLEN); - strcpy(*names,test); - names++; - name_index++; - } + die("Unable to open tests directory."); } - }while (_findnext(handle,&dir) == 0); - _findclose(handle); + do + { + if (!(dir.attrib & _A_SUBDIR)) + { + strcpy(test, strlwr(dir.name)); + + /* find the test suffix */ + if ((position= strinstr(test, TEST_SUFFIX)) != 0) + { + if (name_index < MAX_COUNT_TESTES) + { + /* null terminate at the suffix */ + *(test + position - 1)= '\0'; + /* insert test */ + *names= malloc(FN_REFLEN); + strcpy(*names,test); + names++; + name_index++; + } + else + die("can not sort files, array is overloaded"); + } + } + }while (_findnext(handle,&dir) == 0); + + _findclose(handle); + } +#endif qsort( (void *)testes, name_index, sizeof( char * ), compare ); - for (index= 0; index <= name_index; index++) + for (index= 0; index < name_index; index++) { run_test(testes[index]); free(testes[index]); } free(testes); -#endif } /* stop server */ diff --git a/mysql-test/r/cast.result b/mysql-test/r/cast.result index a893e6ee4ed..10eb597fa68 100644 --- a/mysql-test/r/cast.result +++ b/mysql-test/r/cast.result @@ -178,3 +178,12 @@ aaa aa aab aa aac aa DROP TABLE t1; +select date_add(cast('2004-12-30 12:00:00' as date), interval 0 hour); +date_add(cast('2004-12-30 12:00:00' as date), interval 0 hour) +2004-12-30 00:00:00 +select timediff(cast('2004-12-30 12:00:00' as time), '12:00:00'); +timediff(cast('2004-12-30 12:00:00' as time), '12:00:00') +00:00:00 +select timediff(cast('1 12:00:00' as time), '12:00:00'); +timediff(cast('1 12:00:00' as time), '12:00:00') +24:00:00 diff --git a/mysql-test/r/delayed.result b/mysql-test/r/delayed.result index ceb511a7891..e9766622cf6 100644 --- a/mysql-test/r/delayed.result +++ b/mysql-test/r/delayed.result @@ -22,6 +22,9 @@ insert delayed into t1 values (null,"c"); insert delayed into t1 values (3,"d"),(null,"e"); insert delayed into t1 values (3,"this will give an","error"); ERROR 21S01: Column count doesn't match value count at row 1 +show status like 'not_flushed_delayed_rows'; +Variable_name Value +Not_flushed_delayed_rows 0 select * from t1; a b 1 b diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index 32c80bb330e..b30fddb8de0 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -474,12 +474,15 @@ unix_timestamp(@a) select unix_timestamp('1969-12-01 19:00:01'); unix_timestamp('1969-12-01 19:00:01') 0 -select from_unixtime(0); -from_unixtime(0) +select from_unixtime(-1); +from_unixtime(-1) NULL select from_unixtime(2145916800); from_unixtime(2145916800) NULL +select from_unixtime(0); +from_unixtime(0) +1970-01-01 03:00:00 CREATE TABLE t1 (datetime datetime, timestamp timestamp, date date, time time); INSERT INTO t1 values ("2001-01-02 03:04:05", "2002-01-02 03:04:05", "2003-01-02", "06:07:08"); SELECT * from t1; diff --git a/mysql-test/r/group_min_max.result b/mysql-test/r/group_min_max.result index 006e7052376..c7be93b0fd7 100644 --- a/mysql-test/r/group_min_max.result +++ b/mysql-test/r/group_min_max.result @@ -148,7 +148,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 range NULL idx_t1_1 147 NULL 17 Using index for group-by explain select a1,a2,b,max(c),min(c) from t2 group by a1,a2,b; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range NULL idx_t2_1 163 NULL 21 Using index for group-by +1 SIMPLE t2 range NULL idx_t2_1 # NULL 21 Using index for group-by explain select min(a2), a1, max(a2), min(a2), a1 from t1 group by a1; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 range NULL idx_t1_1 65 NULL 5 Using index for group-by diff --git a/mysql-test/r/multi_update.result b/mysql-test/r/multi_update.result index d10a4bf65f5..f5c4e19af64 100644 --- a/mysql-test/r/multi_update.result +++ b/mysql-test/r/multi_update.result @@ -476,9 +476,7 @@ aclid bigint, index idx_acl(aclid) insert into t2 values(1,null); delete t2, t1 from t2 left join t1 on (t2.aclid=t1.aclid) where t2.refid='1'; drop table t1, t2; -set @ttype_save=@@storage_engine; -set @@storage_engine=innodb; -create table t1 ( c char(8) not null ); +create table t1 ( c char(8) not null ) engine=innodb; insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'); insert into t1 values ('A'),('B'),('C'),('D'),('E'),('F'); alter table t1 add b char(8) not null; @@ -489,8 +487,7 @@ create table t2 like t1; insert into t2 select * from t1; delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b; drop table t1,t2; -set @@storage_engine=bdb; -create table t1 ( c char(8) not null ); +create table t1 ( c char(8) not null ) engine=bdb; insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'); insert into t1 values ('A'),('B'),('C'),('D'),('E'),('F'); alter table t1 add b char(8) not null; @@ -500,7 +497,6 @@ update t1 set a=c, b=c; create table t2 like t1; insert into t2 select * from t1; delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b; -set @@storage_engine=@ttype_save; drop table t1,t2; create table t1 (a int, b int); insert into t1 values (1, 2), (2, 3), (3, 4); diff --git a/mysql-test/r/ndb_alter_table.result b/mysql-test/r/ndb_alter_table.result index 1661fa35d13..f3b9e962873 100644 --- a/mysql-test/r/ndb_alter_table.result +++ b/mysql-test/r/ndb_alter_table.result @@ -34,13 +34,13 @@ col5 enum('PENDING', 'ACTIVE', 'DISABLED') not null, col6 int not null, to_be_deleted int) ENGINE=ndbcluster; show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 ndbcluster 9 Dynamic 0 0 0 0 0 0 1 NULL NULL NULL latin1_swedish_ci NULL +t1 ndbcluster 10 Dynamic 0 0 0 0 0 0 1 NULL NULL NULL latin1_swedish_ci NULL SET SQL_MODE=NO_AUTO_VALUE_ON_ZERO; insert into t1 values (0,4,3,5,"PENDING",1,7),(NULL,4,3,5,"PENDING",1,7),(31,4,3,5,"PENDING",1,7), (7,4,3,5,"PENDING",1,7), (NULL,4,3,5,"PENDING",1,7), (100,4,3,5,"PENDING",1,7), (99,4,3,5,"PENDING",1,7), (8,4,3,5,"PENDING",1,7), (NULL,4,3,5,"PENDING",1,7); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 ndbcluster 9 Dynamic 9 0 0 0 0 0 101 NULL NULL NULL latin1_swedish_ci NULL +t1 ndbcluster 10 Dynamic 9 0 0 0 0 0 101 NULL NULL NULL latin1_swedish_ci NULL select * from t1 order by col1; col1 col2 col3 col4 col5 col6 to_be_deleted 0 4 3 5 PENDING 1 7 @@ -60,7 +60,7 @@ change column col2 fourth varchar(30) not null after col3, modify column col6 int not null first; show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 ndbcluster 9 Dynamic 9 0 0 0 0 0 102 NULL NULL NULL latin1_swedish_ci NULL +t1 ndbcluster 10 Dynamic 9 0 0 0 0 0 102 NULL NULL NULL latin1_swedish_ci NULL select * from t1 order by col1; col6 col1 col3 fourth col4 col4_5 col5 col7 col8 1 0 3 4 5 PENDING 0000-00-00 00:00:00 @@ -75,7 +75,7 @@ col6 col1 col3 fourth col4 col4_5 col5 col7 col8 insert into t1 values (2, NULL,4,3,5,99,"PENDING","EXTRA",'2004-01-01 00:00:00'); show table status; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment -t1 ndbcluster 9 Dynamic 10 0 0 0 0 0 103 NULL NULL NULL latin1_swedish_ci NULL +t1 ndbcluster 10 Dynamic 10 0 0 0 0 0 103 NULL NULL NULL latin1_swedish_ci NULL select * from t1 order by col1; col6 col1 col3 fourth col4 col4_5 col5 col7 col8 1 0 3 4 5 PENDING 0000-00-00 00:00:00 diff --git a/mysql-test/r/ndb_bitfield.result b/mysql-test/r/ndb_bitfield.result index 1532697c428..66ec593e195 100644 --- a/mysql-test/r/ndb_bitfield.result +++ b/mysql-test/r/ndb_bitfield.result @@ -143,7 +143,7 @@ create table t1 ( pk1 bit(9) not null primary key, b int ) engine=ndbcluster; -ERROR HY000: Can't create table './test/t1.frm' (errno: 743) +ERROR HY000: Can't create table './test/t1.frm' (errno: 739) create table t1 ( pk1 int not null primary key, b bit(9), diff --git a/mysql-test/r/ndb_charset.result b/mysql-test/r/ndb_charset.result index 00bc36a7c0d..752a4fba630 100644 --- a/mysql-test/r/ndb_charset.result +++ b/mysql-test/r/ndb_charset.result @@ -47,6 +47,40 @@ a aAa drop table t1; create table t1 ( +a varchar(20) character set latin1 collate latin1_swedish_ci primary key +) engine=ndb; +insert into t1 values ('A'),('b '),('C '),('d '),('E'),('f'); +insert into t1 values('b'); +ERROR 23000: Duplicate entry 'b' for key 1 +insert into t1 values('a '); +ERROR 23000: Duplicate entry 'a ' for key 1 +select a,length(a) from t1 order by a; +a length(a) +A 1 +b 2 +C 3 +d 7 +E 1 +f 1 +select a,length(a) from t1 order by a desc; +a length(a) +f 1 +E 1 +d 7 +C 3 +b 2 +A 1 +select * from t1 where a = 'a'; +a +A +select * from t1 where a = 'a '; +a +A +select * from t1 where a = 'd'; +a +d +drop table t1; +create table t1 ( p int primary key, a char(3) character set latin1 collate latin1_bin not null, unique key(a) @@ -99,6 +133,42 @@ p a drop table t1; create table t1 ( p int primary key, +a varchar(20) character set latin1 collate latin1_swedish_ci not null, +unique key(a) +) engine=ndb; +insert into t1 values (1,'A'),(2,'b '),(3,'C '),(4,'d '),(5,'E'),(6,'f'); +insert into t1 values(99,'b'); +ERROR 23000: Duplicate entry '99' for key 1 +insert into t1 values(99,'a '); +ERROR 23000: Duplicate entry '99' for key 1 +select a,length(a) from t1 order by a; +a length(a) +A 1 +b 2 +C 3 +d 7 +E 1 +f 1 +select a,length(a) from t1 order by a desc; +a length(a) +f 1 +E 1 +d 7 +C 3 +b 2 +A 1 +select * from t1 where a = 'a'; +p a +1 A +select * from t1 where a = 'a '; +p a +1 A +select * from t1 where a = 'd'; +p a +4 d +drop table t1; +create table t1 ( +p int primary key, a char(3) character set latin1 collate latin1_bin not null, index(a) ) engine=ndb; @@ -190,7 +260,77 @@ p a 6 AAA drop table t1; create table t1 ( -a varchar(10) primary key +p int primary key, +a varchar(20) character set latin1 collate latin1_swedish_ci not null, +index(a, p) +) engine=ndb; +insert into t1 values (1,'A'),(2,'b '),(3,'C '),(4,'d '),(5,'E'),(6,'f'); +insert into t1 values (7,'a'),(8,'B '),(9,'c '),(10,'D'),(11,'e'),(12,'F '); +select p,a,length(a) from t1 order by a, p; +p a length(a) +1 A 1 +7 a 1 +2 b 2 +8 B 2 +3 C 3 +9 c 3 +4 d 7 +10 D 1 +5 E 1 +11 e 1 +6 f 1 +12 F 3 +select * from t1 where a = 'a ' order by a desc, p desc; +p a +7 a +1 A +select * from t1 where a >= 'D' order by a, p; +p a +4 d +10 D +5 E +11 e +6 f +12 F +select * from t1 where a < 'D' order by a, p; +p a +1 A +7 a +2 b +8 B +3 C +9 c +select count(*) from t1 x, t1 y, t1 z where x.a = y.a and y.a = z.a; +count(*) +48 +drop table t1; +create table t1 ( +a char(5) character set ucs2, +b varchar(7) character set utf8, +primary key(a, b) +) engine=ndb; +insert into t1 values +('a','A '),('B ','b'),('c','C '),('D','d'),('e ','E'),('F','f '), +('A','b '),('b ','C'),('C','d '),('d','E'),('E ','f'), +('a','C '),('B ','d'),('c','E '),('D','f'); +insert into t1 values('d','f'); +ERROR 23000: Duplicate entry '' for key 1 +select a,b,length(a),length(b) from t1 order by a,b limit 3; +a b length(a) length(b) +a A 2 2 +A b 2 2 +a C 2 2 +select a,b,length(a),length(b) from t1 order by a desc, b desc limit 3; +a b length(a) length(b) +F f 2 3 +E f 2 1 +e E 2 1 +select a,b,length(a),length(b) from t1 where a='c' and b='c'; +a b length(a) length(b) +c C 2 5 +drop table t1; +create table t1 ( +a char(10) primary key ) engine=ndb; insert into t1 values ('jonas % '); replace into t1 values ('jonas % '); diff --git a/mysql-test/r/ps_2myisam.result b/mysql-test/r/ps_2myisam.result index 1fbdd737eba..621a7ceeab8 100644 --- a/mysql-test/r/ps_2myisam.result +++ b/mysql-test/r/ps_2myisam.result @@ -3026,7 +3026,7 @@ c1 c13 c14 c15 c16 c17 42 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 43 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 50 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 -51 0010-00-00 0010-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 +51 2010-00-00 2010-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 52 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 53 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 60 NULL NULL 1991-01-01 01:01:01 NULL NULL diff --git a/mysql-test/r/ps_3innodb.result b/mysql-test/r/ps_3innodb.result index e18f7745091..4fc3ce54810 100644 --- a/mysql-test/r/ps_3innodb.result +++ b/mysql-test/r/ps_3innodb.result @@ -3009,7 +3009,7 @@ c1 c13 c14 c15 c16 c17 42 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 43 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 50 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 -51 0010-00-00 0010-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 +51 2010-00-00 2010-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 52 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 53 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 60 NULL NULL 1991-01-01 01:01:01 NULL NULL diff --git a/mysql-test/r/ps_4heap.result b/mysql-test/r/ps_4heap.result index 690bee55848..5025e847f1a 100644 --- a/mysql-test/r/ps_4heap.result +++ b/mysql-test/r/ps_4heap.result @@ -3010,7 +3010,7 @@ c1 c13 c14 c15 c16 c17 42 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 43 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 50 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 -51 0010-00-00 0010-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 +51 2010-00-00 2010-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 52 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 53 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 60 NULL NULL 1991-01-01 01:01:01 NULL NULL diff --git a/mysql-test/r/ps_5merge.result b/mysql-test/r/ps_5merge.result index 5c71d1fc53c..3dff34e374f 100644 --- a/mysql-test/r/ps_5merge.result +++ b/mysql-test/r/ps_5merge.result @@ -2946,7 +2946,7 @@ c1 c13 c14 c15 c16 c17 42 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 43 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 50 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 -51 0010-00-00 0010-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 +51 2010-00-00 2010-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 52 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 53 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 60 NULL NULL 1991-01-01 01:01:01 NULL NULL @@ -5955,7 +5955,7 @@ c1 c13 c14 c15 c16 c17 42 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 43 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 50 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 -51 0010-00-00 0010-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 +51 2010-00-00 2010-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 52 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 53 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 60 NULL NULL 1991-01-01 01:01:01 NULL NULL diff --git a/mysql-test/r/ps_6bdb.result b/mysql-test/r/ps_6bdb.result index b6fb52c1120..6817c0000c5 100644 --- a/mysql-test/r/ps_6bdb.result +++ b/mysql-test/r/ps_6bdb.result @@ -3009,7 +3009,7 @@ c1 c13 c14 c15 c16 c17 42 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 43 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 50 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 -51 0010-00-00 0010-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 +51 2010-00-00 2010-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 52 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 53 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 60 NULL NULL 1991-01-01 01:01:01 NULL NULL diff --git a/mysql-test/r/ps_7ndb.result b/mysql-test/r/ps_7ndb.result index 8ba900334fc..399b3b5017b 100644 --- a/mysql-test/r/ps_7ndb.result +++ b/mysql-test/r/ps_7ndb.result @@ -3009,7 +3009,7 @@ c1 c13 c14 c15 c16 c17 42 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 43 0000-00-00 0000-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 50 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 -51 0010-00-00 0010-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 +51 2010-00-00 2010-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 52 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 53 2001-00-00 2001-00-00 00:00:00 0000-00-00 00:00:00 838:59:59 0000 60 NULL NULL 1991-01-01 01:01:01 NULL NULL diff --git a/mysql-test/r/select.result b/mysql-test/r/select.result index ac0157fcfd1..7d5b2ed18cb 100644 --- a/mysql-test/r/select.result +++ b/mysql-test/r/select.result @@ -1,5 +1,5 @@ drop table if exists t1,t2,t3,t4; -drop table if exists t1_1,t1_2,t9_1,t9_2; +drop table if exists t1_1,t1_2,t9_1,t9_2,t1aa,t2aa; drop view if exists v1; CREATE TABLE t1 ( Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL, diff --git a/mysql-test/r/sp-error.result b/mysql-test/r/sp-error.result index 642f1aedd78..57126162e3f 100644 --- a/mysql-test/r/sp-error.result +++ b/mysql-test/r/sp-error.result @@ -47,6 +47,8 @@ Warnings: Note 1305 PROCEDURE foo does not exist show create procedure foo| ERROR 42000: PROCEDURE foo does not exist +show create function foo| +ERROR 42000: FUNCTION foo does not exist create procedure foo() foo: loop leave bar; diff --git a/mysql-test/r/type_datetime.result b/mysql-test/r/type_datetime.result index 586f74bee20..920c82c3e67 100644 --- a/mysql-test/r/type_datetime.result +++ b/mysql-test/r/type_datetime.result @@ -143,3 +143,13 @@ t 0000-00-00 00:00:00 2003-01-01 00:00:00 drop table t1; +create table t1 (dt datetime); +insert into t1 values ("12-00-00"), ("00-00-00 01:00:00"); +insert into t1 values ("00-00-00"), ("00-00-00 00:00:00"); +select * from t1; +dt +2012-00-00 00:00:00 +2000-00-00 01:00:00 +0000-00-00 00:00:00 +0000-00-00 00:00:00 +drop table t1; diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index 8013521e2f4..c4391781e9c 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -363,107 +363,6 @@ v4 CREATE ALGORITHM=TEMPTABLE VIEW `mysqltest`.`v4` AS select (`mysqltest`.`t2`. revoke all privileges on mysqltest.* from mysqltest_1@localhost; delete from mysql.user where user='mysqltest_1'; drop database mysqltest; -set GLOBAL query_cache_size=1355776; -flush status; -create table t1 (a int, b int); -create view v1 (c,d) as select sql_no_cache a,b from t1; -create view v2 (c,d) as select a+rand(),b from t1; -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 0 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 0 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 0 -select * from v1; -c d -select * from v2; -c d -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 0 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 0 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 0 -select * from v1; -c d -select * from v2; -c d -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 0 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 0 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 0 -drop view v1,v2; -set query_cache_type=demand; -flush status; -create view v1 (c,d) as select sql_cache a,b from t1; -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 0 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 0 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 0 -select * from v1; -c d -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 1 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 1 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 0 -select * from t1; -a b -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 1 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 1 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 0 -select * from v1; -c d -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 1 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 1 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 1 -select * from t1; -a b -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 1 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 1 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 1 -drop view v1; -set query_cache_type=default; -drop table t1; -set GLOBAL query_cache_size=default; create table t1 (a int); insert into t1 values (1), (2), (3), (1), (2), (3); create view v1 as select distinct a from t1; @@ -1433,7 +1332,8 @@ insert into v1 values (1) on duplicate key update a=2; insert into v1 values (1) on duplicate key update a=2; ERROR HY000: CHECK OPTION failed 'test.v1' insert ignore into v1 values (1) on duplicate key update a=2; -ERROR HY000: CHECK OPTION failed 'test.v1' +Warnings: +Error 1369 CHECK OPTION failed 'test.v1' select * from t1; a 1 diff --git a/mysql-test/r/view_query_cache.result b/mysql-test/r/view_query_cache.result new file mode 100644 index 00000000000..f46f0f609cd --- /dev/null +++ b/mysql-test/r/view_query_cache.result @@ -0,0 +1,101 @@ +set GLOBAL query_cache_size=1355776; +flush status; +create table t1 (a int, b int); +create view v1 (c,d) as select sql_no_cache a,b from t1; +create view v2 (c,d) as select a+rand(),b from t1; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 0 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from v1; +c d +select * from v2; +c d +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 0 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from v1; +c d +select * from v2; +c d +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 0 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +drop view v1,v2; +set query_cache_type=demand; +flush status; +create view v1 (c,d) as select sql_cache a,b from t1; +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 0 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 0 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from v1; +c d +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from t1; +a b +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +select * from v1; +c d +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +select * from t1; +a b +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 1 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 1 +drop view v1; +set query_cache_type=default; +drop table t1; +set GLOBAL query_cache_size=default; diff --git a/mysql-test/t/cast.test b/mysql-test/t/cast.test index e5681dedbac..23bba7d5aff 100644 --- a/mysql-test/t/cast.test +++ b/mysql-test/t/cast.test @@ -108,3 +108,13 @@ SELECT a, CAST(a AS CHAR(3)) FROM t1 ORDER BY CAST(a AS CHAR(2)), a; SELECT a, CAST(a AS UNSIGNED) FROM t1 ORDER BY CAST(a AS CHAR) ; SELECT a, CAST(a AS CHAR(2)) FROM t1 ORDER BY CAST(a AS CHAR(3)), a; DROP TABLE t1; + +# +# Test for bug #6914 "Problems using time()/date() output in expressions". +# When we are casting datetime value to DATE/TIME we should throw away +# time/date parts (correspondingly). +# +select date_add(cast('2004-12-30 12:00:00' as date), interval 0 hour); +select timediff(cast('2004-12-30 12:00:00' as time), '12:00:00'); +# Still we should not throw away "days" part of time value +select timediff(cast('1 12:00:00' as time), '12:00:00'); diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test index 6f222eedec1..686bf5a97ad 100644 --- a/mysql-test/t/create.test +++ b/mysql-test/t/create.test @@ -273,8 +273,8 @@ create table t3 like t1; show create table t3; select * from t3; # Disable PS becasue of @@warning_count ---disable_ps_protocol create table if not exists t3 like t1; +--disable_ps_protocol select @@warning_count; --enable_ps_protocol create temporary table t3 like t2; diff --git a/mysql-test/t/delayed.test b/mysql-test/t/delayed.test index 40bd7a912f3..513de990165 100644 --- a/mysql-test/t/delayed.test +++ b/mysql-test/t/delayed.test @@ -31,6 +31,8 @@ insert delayed into t1 values (null,"c"); insert delayed into t1 values (3,"d"),(null,"e"); --error 1136 insert delayed into t1 values (3,"this will give an","error"); ---sleep 2 +# 2 was not enough for --ps-protocol +--sleep 4 +show status like 'not_flushed_delayed_rows'; select * from t1; drop table t1; diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test index 9a06d07149e..65ef9f2535c 100644 --- a/mysql-test/t/func_group.test +++ b/mysql-test/t/func_group.test @@ -32,9 +32,7 @@ create table t2 (grp int, a bigint unsigned, c char(10)); insert into t2 select grp,max(a)+max(grp),max(c) from t1 group by grp; # REPLACE ... SELECT doesn't yet work with PS ---disable_ps_protocol replace into t2 select grp, a, c from t1 limit 2,1; ---enable_ps_protocol select * from t2; drop table t1,t2; diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index b6240054e0a..80ddb205110 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -231,10 +231,14 @@ select unix_timestamp('1969-12-01 19:00:01'); # # Test for bug #6439 "unix_timestamp() function returns wrong datetime -# values for too big argument". It should return error instead. +# values for too big argument" and bug #7515 "from_unixtime(0) now +# returns NULL instead of the epoch". unix_timestamp() should return error +# for too big or negative argument. It should return Epoch value for zero +# argument since it seems that many user's rely on this fact. # -select from_unixtime(0); +select from_unixtime(-1); select from_unixtime(2145916800); +select from_unixtime(0); # # Test types from + INTERVAL @@ -267,7 +271,10 @@ select date_add(date,INTERVAL "1" QUARTER) from t1; select timestampadd(MINUTE, 1, date) from t1; select timestampadd(WEEK, 1, date) from t1; select timestampadd(SQL_TSI_SECOND, 1, date) from t1; +# Prepared statements doesn't support FRAC_SECOND yet +--disable_ps_protocol select timestampadd(SQL_TSI_FRAC_SECOND, 1, date) from t1; +--enable_ps_protocol select timestampdiff(MONTH, '2001-02-01', '2001-05-01') as a; select timestampdiff(YEAR, '2002-05-01', '2001-01-01') as a; diff --git a/mysql-test/t/group_min_max.test b/mysql-test/t/group_min_max.test index 17a6cc88597..b42125566d5 100644 --- a/mysql-test/t/group_min_max.test +++ b/mysql-test/t/group_min_max.test @@ -174,6 +174,7 @@ explain select a1, max(a2) from t1 group by a1; explain select a1, min(a2), max(a2) from t1 group by a1; explain select a1, a2, b, min(c), max(c) from t1 group by a1,a2,b; explain select a1,a2,b,max(c),min(c) from t1 group by a1,a2,b; +--replace_column 7 # explain select a1,a2,b,max(c),min(c) from t2 group by a1,a2,b; -- Select fields in different order explain select min(a2), a1, max(a2), min(a2), a1 from t1 group by a1; diff --git a/mysql-test/t/having.test b/mysql-test/t/having.test index b0fc600030b..3221b0d4624 100644 --- a/mysql-test/t/having.test +++ b/mysql-test/t/having.test @@ -280,7 +280,11 @@ insert into t1 values (1),(2),(3); select count(*) from t1 group by s1 having s1 is null; +# prepared statements prints warnings too early +--disable_ps_protocol select s1*0 as s1 from t1 group by s1 having s1 <> 0; +--enable_ps_protocol + # ANSI requires: 3 rows # MySQL returns: 0 rows - because of GROUP BY name resolution diff --git a/mysql-test/t/insert_select.test b/mysql-test/t/insert_select.test index e1459310bb9..15509b06679 100644 --- a/mysql-test/t/insert_select.test +++ b/mysql-test/t/insert_select.test @@ -136,9 +136,7 @@ insert into t2 values (2,"t2:2"), (3,"t2:3"); insert into t1 select * from t2; select * from t1; # REPLACE .. SELECT is not yet supported by PS ---disable_ps_protocol replace into t1 select * from t2; ---enable_ps_protocol select * from t1; drop table t1,t2; diff --git a/mysql-test/t/multi_update.test b/mysql-test/t/multi_update.test index de66218c4a6..f3b6216e3cf 100644 --- a/mysql-test/t/multi_update.test +++ b/mysql-test/t/multi_update.test @@ -6,9 +6,9 @@ drop table if exists t1,t2,t3; drop database if exists mysqltest; drop view if exists v1; ---error 0,1141 +--error 0,1141,1147 revoke all privileges on mysqltest.t1 from mysqltest_1@localhost; ---error 0,1141 +--error 0,1141,1147 revoke all privileges on mysqltest.* from mysqltest_1@localhost; delete from mysql.user where user=_binary'mysqltest_1'; --enable_warnings @@ -452,11 +452,8 @@ drop table t1, t2; # # Test for bug #1980. # -set @ttype_save=@@storage_engine; - --disable_warnings -set @@storage_engine=innodb; -create table t1 ( c char(8) not null ); +create table t1 ( c char(8) not null ) engine=innodb; --enable_warnings insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'); @@ -475,8 +472,7 @@ delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b; drop table t1,t2; --disable_warnings -set @@storage_engine=bdb; -create table t1 ( c char(8) not null ); +create table t1 ( c char(8) not null ) engine=bdb; --enable_warnings insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'); @@ -492,7 +488,6 @@ insert into t2 select * from t1; delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b; -set @@storage_engine=@ttype_save; drop table t1,t2; create table t1 (a int, b int); diff --git a/mysql-test/t/ndb_charset.test b/mysql-test/t/ndb_charset.test index 1b9e7e8bfcc..ab2bbcc3ad8 100644 --- a/mysql-test/t/ndb_charset.test +++ b/mysql-test/t/ndb_charset.test @@ -53,6 +53,25 @@ select * from t1 where a = 'AaA'; select * from t1 where a = 'AAA'; drop table t1; +# pk - varchar + +create table t1 ( + a varchar(20) character set latin1 collate latin1_swedish_ci primary key +) engine=ndb; +# +insert into t1 values ('A'),('b '),('C '),('d '),('E'),('f'); +-- error 1062 +insert into t1 values('b'); +-- error 1062 +insert into t1 values('a '); +# +select a,length(a) from t1 order by a; +select a,length(a) from t1 order by a desc; +select * from t1 where a = 'a'; +select * from t1 where a = 'a '; +select * from t1 where a = 'd'; +drop table t1; + # unique hash index - binary create table t1 ( @@ -102,6 +121,27 @@ select * from t1 where a = 'AaA'; select * from t1 where a = 'AAA'; drop table t1; +# unique hash index - varchar + +create table t1 ( + p int primary key, + a varchar(20) character set latin1 collate latin1_swedish_ci not null, + unique key(a) +) engine=ndb; +# +insert into t1 values (1,'A'),(2,'b '),(3,'C '),(4,'d '),(5,'E'),(6,'f'); +-- error 1062 +insert into t1 values(99,'b'); +-- error 1062 +insert into t1 values(99,'a '); +# +select a,length(a) from t1 order by a; +select a,length(a) from t1 order by a desc; +select * from t1 where a = 'a'; +select * from t1 where a = 'a '; +select * from t1 where a = 'd'; +drop table t1; + # ordered index - binary create table t1 ( @@ -158,9 +198,47 @@ select * from t1 where a = 'AaA' order by p; select * from t1 where a = 'AAA' order by p; drop table t1; +# ordered index - varchar + +create table t1 ( + p int primary key, + a varchar(20) character set latin1 collate latin1_swedish_ci not null, + index(a, p) +) engine=ndb; +# +insert into t1 values (1,'A'),(2,'b '),(3,'C '),(4,'d '),(5,'E'),(6,'f'); +insert into t1 values (7,'a'),(8,'B '),(9,'c '),(10,'D'),(11,'e'),(12,'F '); +select p,a,length(a) from t1 order by a, p; +select * from t1 where a = 'a ' order by a desc, p desc; +select * from t1 where a >= 'D' order by a, p; +select * from t1 where a < 'D' order by a, p; +# +select count(*) from t1 x, t1 y, t1 z where x.a = y.a and y.a = z.a; +drop table t1; + +# minimal multi-byte test + +create table t1 ( + a char(5) character set ucs2, + b varchar(7) character set utf8, + primary key(a, b) +) engine=ndb; +# +insert into t1 values + ('a','A '),('B ','b'),('c','C '),('D','d'),('e ','E'),('F','f '), + ('A','b '),('b ','C'),('C','d '),('d','E'),('E ','f'), + ('a','C '),('B ','d'),('c','E '),('D','f'); +-- error 1062 +insert into t1 values('d','f'); +# +select a,b,length(a),length(b) from t1 order by a,b limit 3; +select a,b,length(a),length(b) from t1 order by a desc, b desc limit 3; +select a,b,length(a),length(b) from t1 where a='c' and b='c'; +drop table t1; + # bug create table t1 ( - a varchar(10) primary key + a char(10) primary key ) engine=ndb; insert into t1 values ('jonas % '); replace into t1 values ('jonas % '); diff --git a/mysql-test/t/select.test b/mysql-test/t/select.test index 85a5f6d0ee1..47b115cf030 100644 --- a/mysql-test/t/select.test +++ b/mysql-test/t/select.test @@ -1789,7 +1789,10 @@ CREATE TABLE t1 (gvid int(10) unsigned default NULL, hmid int(10) unsigned defa INSERT INTO t1 VALUES (200001,2,1,1,100,1,1,1,0,0,0,1,0,1,20020425060057,'\\\\ARKIVIO-TESTPDC\\E$',''),(200002,2,2,1,101,1,1,1,0,0,0,1,0,1,20020425060057,'\\\\ARKIVIO-TESTPDC\\C$',''),(200003,1,3,2,NULL,NULL,NULL,NULL,NULL,NULL,NULL,1,0,1,20020425060427,'c:',NULL); CREATE TABLE t2 ( hmid int(10) unsigned default NULL, volid int(10) unsigned default NULL, sampletid smallint(5) unsigned default NULL, sampletime datetime default NULL, samplevalue bigint(20) unsigned default NULL, KEY idx1 (hmid,volid,sampletid,sampletime)) ENGINE=MyISAM; INSERT INTO t2 VALUES (1,3,10,'2002-06-01 08:00:00',35),(1,3,1010,'2002-06-01 12:00:01',35); +# Disable PS becasue we get more warnings from PS than from normal execution +--disable_ps_protocol SELECT a.gvid, (SUM(CASE b.sampletid WHEN 140 THEN b.samplevalue ELSE 0 END)) as the_success,(SUM(CASE b.sampletid WHEN 141 THEN b.samplevalue ELSE 0 END)) as the_fail,(SUM(CASE b.sampletid WHEN 142 THEN b.samplevalue ELSE 0 END)) as the_size,(SUM(CASE b.sampletid WHEN 143 THEN b.samplevalue ELSE 0 END)) as the_time FROM t1 a, t2 b WHERE a.hmid = b.hmid AND a.volid = b.volid AND b.sampletime >= 'wrong-date-value' AND b.sampletime < 'wrong-date-value' AND b.sampletid IN (140, 141, 142, 143) GROUP BY a.gvid; +--enable_ps_protocol # Testing the same select with NULL's instead of invalid datetime values SELECT a.gvid, (SUM(CASE b.sampletid WHEN 140 THEN b.samplevalue ELSE 0 END)) as the_success,(SUM(CASE b.sampletid WHEN 141 THEN b.samplevalue ELSE 0 END)) as the_fail,(SUM(CASE b.sampletid WHEN 142 THEN b.samplevalue ELSE 0 END)) as the_size,(SUM(CASE b.sampletid WHEN 143 THEN b.samplevalue ELSE 0 END)) as the_time FROM t1 a, t2 b WHERE a.hmid = b.hmid AND a.volid = b.volid AND b.sampletime >= NULL AND b.sampletime < NULL AND b.sampletid IN (140, 141, 142, 143) GROUP BY a.gvid; DROP TABLE t1,t2; diff --git a/mysql-test/t/sp-error.test b/mysql-test/t/sp-error.test index c24f9df16be..b0d7ca60f27 100644 --- a/mysql-test/t/sp-error.test +++ b/mysql-test/t/sp-error.test @@ -81,6 +81,8 @@ call foo()| drop procedure if exists foo| --error 1305 show create procedure foo| +--error 1305 +show create function foo| # LEAVE/ITERATE/GOTO with no match --error 1308 diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index 5f912622101..4f556e34d51 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -944,7 +944,7 @@ select fun(2.3, 3, 5)| insert into t2 values (append("xxx", "yyy"), mul(4,3), e())| insert into t2 values (append("a", "b"), mul(2,mul(3,4)), fun(1.7, 4, 6))| -# These don't work yet. +# Disable PS because double's give a bit different values --disable_ps_protocol select * from t2 where s = append("a", "b")| select * from t2 where i = mul(4,3) or i = mul(mul(3,4),2)| diff --git a/mysql-test/t/system_mysql_db_fix.test b/mysql-test/t/system_mysql_db_fix.test index 56f291ae69d..2cefa167466 100644 --- a/mysql-test/t/system_mysql_db_fix.test +++ b/mysql-test/t/system_mysql_db_fix.test @@ -1,6 +1,14 @@ # # This is the test for mysql_fix_privilege_tables # +# Note: If this test fails, don't be confused about the errors reported +# by mysql-test-run; This shows warnings from generated by +# mysql_fix_system_tables which should be ignored. +# Instead, concentrate on the errors in r/system_mysql_db.reject + +--disable_warnings +drop table if exists t1,t1aa,t2aa; +--enable_warnings -- disable_result_log -- disable_query_log diff --git a/mysql-test/t/trigger.test b/mysql-test/t/trigger.test index 3900e59c2ee..bf75f09d553 100644 --- a/mysql-test/t/trigger.test +++ b/mysql-test/t/trigger.test @@ -53,6 +53,7 @@ select @a; drop trigger t1.trg; drop table t1; +# PS doesn't work with multi-row statements --disable_ps_protocol # Before update trigger # (In future we will achieve this via proper error handling in triggers) diff --git a/mysql-test/t/type_blob.test b/mysql-test/t/type_blob.test index 085cad8cffa..20a501bb5ed 100644 --- a/mysql-test/t/type_blob.test +++ b/mysql-test/t/type_blob.test @@ -17,10 +17,8 @@ drop table if exists t1,t2,t3,t4,t5,t6,t7; CREATE TABLE t1 (a blob, b text, c blob(250), d text(70000), e text(70000000)); show columns from t1; # PS doesn't give errors on prepare yet ---disable_ps_protocol CREATE TABLE t2 (a char(255), b varbinary(70000), c varchar(70000000)); CREATE TABLE t4 (c varchar(65530) character set utf8 not null); ---enable_ps_protocol show columns from t2; create table t3 (a long, b long byte); show create TABLE t3; diff --git a/mysql-test/t/type_datetime.test b/mysql-test/t/type_datetime.test index 04e4a73554a..a7eb78cb292 100644 --- a/mysql-test/t/type_datetime.test +++ b/mysql-test/t/type_datetime.test @@ -89,3 +89,15 @@ delete from t1; insert into t1 values ("0000-00-00 00:00:00 some trailer"),("2003-01-01 00:00:00 some trailer"); select * from t1; drop table t1; + +# +# Test for bug #7297 "Two digit year should be interpreted correctly even +# with zero month and day" +# +create table t1 (dt datetime); +# These dates should be treated as dates in 21st century +insert into t1 values ("12-00-00"), ("00-00-00 01:00:00"); +# Zero dates are still special :/ +insert into t1 values ("00-00-00"), ("00-00-00 00:00:00"); +select * from t1; +drop table t1; diff --git a/mysql-test/t/union.test b/mysql-test/t/union.test index 468a88b83db..58e4c22e168 100644 --- a/mysql-test/t/union.test +++ b/mysql-test/t/union.test @@ -5,9 +5,6 @@ --disable_warnings drop table if exists t1,t2,t3,t4,t5,t6; --enable_warnings -# PS doesn't work correctly with found_rows: to be fixed ---disable_ps_protocol - CREATE TABLE t1 (a int not null, b char (10) not null); insert into t1 values(1,'a'),(2,'b'),(3,'c'),(3,'c'); @@ -30,9 +27,12 @@ select 't1',b,count(*) from t1 group by b UNION select 't2',b,count(*) from t2 g (select a,b from t1 limit 2) union all (select a,b from t2 order by a limit 1) order by t1.b; explain extended (select a,b from t1 limit 2) union all (select a,b from t2 order by a limit 1) order by b desc; (select sql_calc_found_rows a,b from t1 limit 2) union all (select a,b from t2 order by a) limit 2; +# PS doesn't work correctly with found_rows: to be fixed +--disable_ps_protocol select found_rows(); select sql_calc_found_rows a,b from t1 union all select a,b from t2 limit 2; select found_rows(); +--enable_ps_protocol # # Test some error conditions with UNION @@ -210,15 +210,27 @@ insert into t2 values (3),(4),(5); # Test global limits (SELECT SQL_CALC_FOUND_ROWS * FROM t1) UNION all (SELECT * FROM t2) LIMIT 1; +# PS doesn't work correctly with found_rows: to be fixed +--disable_ps_protocol select found_rows(); +--enable_ps_protocol (SELECT SQL_CALC_FOUND_ROWS * FROM t1 LIMIT 1) UNION all (SELECT * FROM t2) LIMIT 2; +# PS doesn't work correctly with found_rows: to be fixed +--disable_ps_protocol select found_rows(); +--enable_ps_protocol # Test cases where found_rows() should return number of returned rows (SELECT SQL_CALC_FOUND_ROWS * FROM t1 LIMIT 1) UNION all (SELECT * FROM t2); +# PS doesn't work correctly with found_rows: to be fixed +--disable_ps_protocol select found_rows(); +--enable_ps_protocol (SELECT SQL_CALC_FOUND_ROWS * FROM t1) UNION all (SELECT * FROM t2 LIMIT 1); +# PS doesn't work correctly with found_rows: to be fixed +--disable_ps_protocol select found_rows(); +--enable_ps_protocol # This used to work in 4.0 but not anymore in 4.1 --error 1064 (SELECT SQL_CALC_FOUND_ROWS * FROM t1 LIMIT 1) UNION SELECT * FROM t2 LIMIT 1; @@ -226,9 +238,15 @@ select found_rows(); # In these case found_rows() should work SELECT SQL_CALC_FOUND_ROWS * FROM t1 LIMIT 1 UNION all SELECT * FROM t2 LIMIT 2; +# PS doesn't work correctly with found_rows: to be fixed +--disable_ps_protocol select found_rows(); +--disable_ps_protocol SELECT SQL_CALC_FOUND_ROWS * FROM t1 UNION all SELECT * FROM t2 LIMIT 2; +# PS doesn't work correctly with found_rows: to be fixed +--disable_ps_protocol select found_rows(); +--disable_ps_protocol # The following examples will not be exact SELECT SQL_CALC_FOUND_ROWS * FROM t1 UNION SELECT * FROM t2 LIMIT 2; diff --git a/mysql-test/t/user_limits.test b/mysql-test/t/user_limits.test index 8c6d5453789..50c16e5e114 100644 --- a/mysql-test/t/user_limits.test +++ b/mysql-test/t/user_limits.test @@ -14,6 +14,9 @@ delete from mysql.tables_priv where user like 'mysqltest\_%'; delete from mysql.columns_priv where user like 'mysqltest\_%'; flush privileges; +# Limits doesn't work with prepared statements (yet) +--disable_ps_protocol + # Test of MAX_QUERIES_PER_HOUR limit grant usage on *.* to mysqltest_1@localhost with max_queries_per_hour 2; connect (mqph, localhost, mysqltest_1,,); @@ -149,6 +152,7 @@ disconnect muca2; disconnect muca3; set global max_user_connections= 0; drop user mysqltest_1@localhost; +--enable_ps_protocol # Final cleanup drop table t1; diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test index 6dc49b28426..77f0f65323e 100644 --- a/mysql-test/t/view.test +++ b/mysql-test/t/view.test @@ -328,63 +328,6 @@ revoke all privileges on mysqltest.* from mysqltest_1@localhost; delete from mysql.user where user='mysqltest_1'; drop database mysqltest; -# -# QUERY CACHE options for VIEWs -# -set GLOBAL query_cache_size=1355776; -flush status; -create table t1 (a int, b int); - -# queries with following views should not be in query cache -create view v1 (c,d) as select sql_no_cache a,b from t1; -create view v2 (c,d) as select a+rand(),b from t1; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_inserts"; -show status like "Qcache_hits"; -select * from v1; -select * from v2; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_inserts"; -show status like "Qcache_hits"; -select * from v1; -select * from v2; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_inserts"; -show status like "Qcache_hits"; - -drop view v1,v2; - -# SQL_CACHE option -set query_cache_type=demand; -flush status; -# query with view will be cached, but direct acess to table will not -create view v1 (c,d) as select sql_cache a,b from t1; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_inserts"; -show status like "Qcache_hits"; -select * from v1; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_inserts"; -show status like "Qcache_hits"; -select * from t1; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_inserts"; -show status like "Qcache_hits"; -select * from v1; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_inserts"; -show status like "Qcache_hits"; -select * from t1; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_inserts"; -show status like "Qcache_hits"; -drop view v1; -set query_cache_type=default; - -drop table t1; -set GLOBAL query_cache_size=default; - - # # DISTINCT option for VIEW # @@ -1394,7 +1337,6 @@ create view v1 as select * from t1 where a < 2 with check option; insert into v1 values (1) on duplicate key update a=2; -- error 1369 insert into v1 values (1) on duplicate key update a=2; --- error 1369 insert ignore into v1 values (1) on duplicate key update a=2; select * from t1; drop view v1; diff --git a/mysql-test/t/view_query_cache.test b/mysql-test/t/view_query_cache.test new file mode 100644 index 00000000000..dd9c8006915 --- /dev/null +++ b/mysql-test/t/view_query_cache.test @@ -0,0 +1,58 @@ +-- source include/have_query_cache.inc +# +# QUERY CACHE options for VIEWs +# +set GLOBAL query_cache_size=1355776; +flush status; +create table t1 (a int, b int); + +# queries with following views should not be in query cache +create view v1 (c,d) as select sql_no_cache a,b from t1; +create view v2 (c,d) as select a+rand(),b from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from v1; +select * from v2; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from v1; +select * from v2; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; + +drop view v1,v2; + +# SQL_CACHE option +set query_cache_type=demand; +flush status; +# query with view will be cached, but direct acess to table will not +create view v1 (c,d) as select sql_cache a,b from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from v1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from v1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +select * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +drop view v1; +set query_cache_type=default; + +drop table t1; +set GLOBAL query_cache_size=default; + + diff --git a/mysql-test/t/warnings.test b/mysql-test/t/warnings.test index 4bd659606f6..67162e7f84b 100644 --- a/mysql-test/t/warnings.test +++ b/mysql-test/t/warnings.test @@ -26,9 +26,8 @@ show warnings limit 1; drop database if exists not_exists_db; show count(*) warnings; create table t1(id int); -# PS doesn't give warnings on prepare ---disable_ps_protocol create table if not exists t1(id int); +--disable_ps_protocol select @@warning_count; --enable_ps_protocol drop table t1; @@ -96,12 +95,9 @@ drop table t1; # Test for deprecated TYPE= syntax # -# PS doesn't give warnings on prepare ---disable_ps_protocol create table t1 (id int) type=heap; alter table t1 type=myisam; drop table t1; ---enable_ps_protocol # # Test for deprecated table_type variable diff --git a/mysys/Makefile.am b/mysys/Makefile.am index b0ca1b402ee..4698d06d8c3 100644 --- a/mysys/Makefile.am +++ b/mysys/Makefile.am @@ -66,6 +66,7 @@ DEFS = -DDEFAULT_BASEDIR=\"$(prefix)\" \ -DDATADIR="\"$(MYSQLDATAdir)\"" \ -DDEFAULT_CHARSET_HOME="\"$(MYSQLBASEdir)\"" \ -DSHAREDIR="\"$(MYSQLSHAREdir)\"" \ + -DDEFAULT_HOME_ENV=MYSQL_HOME \ @DEFS@ libmysys_a_DEPENDENCIES= @THREAD_LOBJECTS@ diff --git a/mysys/default.c b/mysys/default.c index d6d84f65d8b..3de134d936f 100644 --- a/mysys/default.c +++ b/mysys/default.c @@ -45,23 +45,8 @@ char *defaults_extra_file=0; /* Which directories are searched for options (and in which order) */ -const char *default_directories[]= { -#ifdef __WIN__ -"C:/", -#elif defined(__NETWARE__) -"sys:/etc/", -#else -"/etc/", -#endif -#ifdef DATADIR -DATADIR, -#endif -"", /* Place for defaults_extra_dir */ -#if !defined(__WIN__) && !defined(__NETWARE__) -"~/", -#endif -NullS, -}; +#define MAX_DEFAULT_DIRS 4 +const char *default_directories[MAX_DEFAULT_DIRS + 1]; #ifdef __WIN__ static const char *f_extensions[]= { ".ini", ".cnf", 0 }; @@ -89,6 +74,7 @@ static int search_default_file_with_ext(Process_option_func func, void *func_ctx, const char *dir, const char *ext, const char *config_file); +static void init_default_directories(); static char *remove_end_comment(char *ptr); @@ -319,6 +305,7 @@ int load_defaults(const char *conf_file, const char **groups, struct handle_option_ctx ctx; DBUG_ENTER("load_defaults"); + init_default_directories(); init_alloc_root(&alloc,512,0); if (*argc >= 2 && !strcmp(argv[0][1],"--no-defaults")) { @@ -652,6 +639,7 @@ void print_defaults(const char *conf_file, const char **groups) char name[FN_REFLEN], **ext; const char **dirs; + init_default_directories(); puts("\nDefault options are read from the following files in the given order:"); if (dirname_length(conf_file)) @@ -714,3 +702,23 @@ void print_defaults(const char *conf_file, const char **groups) } #include + +static void init_default_directories() +{ + const char *env, **ptr= default_directories; + +#ifdef __WIN__ + *ptr++= "C:/"; +#elif defined(__NETWARE__) + *ptr++= "sys:/etc/"; +#else + *ptr++= "/etc/"; +#endif + if ((env= getenv(STRINGIFY_ARG(DEFAULT_HOME_ENV)))) + *ptr++= env; + *ptr++= ""; /* Place for defaults_extra_file */ +#if !defined(__WIN__) && !defined(__NETWARE__) + *ptr++= "~/";; +#endif + *ptr= 0; /* end marker */ +} diff --git a/mysys/mf_keycaches.c b/mysys/mf_keycaches.c index 8bf203e249f..fee3096de52 100644 --- a/mysys/mf_keycaches.c +++ b/mysys/mf_keycaches.c @@ -235,7 +235,7 @@ static my_bool safe_hash_set(SAFE_HASH *hash, const byte *key, uint length, if (my_hash_insert(&hash->hash, (byte*) entry)) { /* This can only happen if hash got out of memory */ - my_delete((char*) entry, MYF(0)); + my_free((char*) entry, MYF(0)); error= 1; goto end; } diff --git a/mysys/my_sleep.c b/mysys/my_sleep.c index 3de2d2abd13..31eaf7eeb96 100644 --- a/mysys/my_sleep.c +++ b/mysys/my_sleep.c @@ -23,6 +23,8 @@ void my_sleep(ulong m_seconds) { #ifdef __NETWARE__ delay(m_seconds/1000+1); +#elif defined(__WIN__) + Sleep(m_seconds/1000+1); /* Sleep() has millisecond arg */ #elif defined(OS2) DosSleep(m_seconds/1000+1); #elif defined(HAVE_SELECT) diff --git a/ndb/examples/ndbapi_async_example/ndbapi_async.cpp b/ndb/examples/ndbapi_async_example/ndbapi_async.cpp index 9fd37f3a670..c11b6e849e4 100644 --- a/ndb/examples/ndbapi_async_example/ndbapi_async.cpp +++ b/ndb/examples/ndbapi_async_example/ndbapi_async.cpp @@ -381,7 +381,7 @@ int populate(Ndb * myNdb, int data, async_callback_t * cbData) } /*Prepare transaction (the transaction is NOT yet sent to NDB)*/ - transaction[current].conn->executeAsynchPrepare(Commit, + transaction[current].conn->executeAsynchPrepare(NdbTransaction::Commit, &callback, cb); /** diff --git a/ndb/examples/ndbapi_async_example1/ndbapi_async1.cpp b/ndb/examples/ndbapi_async_example1/ndbapi_async1.cpp index 9af9c72c260..fb806a5bbb5 100644 --- a/ndb/examples/ndbapi_async_example1/ndbapi_async1.cpp +++ b/ndb/examples/ndbapi_async_example1/ndbapi_async1.cpp @@ -99,7 +99,8 @@ int main() myNdbOperation->setValue("ATTR2", 20 + i); // Prepare transaction (the transaction is NOT yet sent to NDB) - myNdbTransaction[i]->executeAsynchPrepare(Commit, &callback, NULL); + myNdbTransaction[i]->executeAsynchPrepare(NdbTransaction::Commit, + &callback, NULL); } // Send all transactions to NDB diff --git a/ndb/examples/ndbapi_event_example/ndbapi_event.cpp b/ndb/examples/ndbapi_event_example/ndbapi_event.cpp index 82e39e32d13..f03564744c7 100644 --- a/ndb/examples/ndbapi_event_example/ndbapi_event.cpp +++ b/ndb/examples/ndbapi_event_example/ndbapi_event.cpp @@ -36,7 +36,7 @@ * * NdbDictionary::Event * setTable() - * addtableEvent() + * addTableEvent() * addEventColumn() * * NdbEventOperation @@ -63,12 +63,12 @@ * another process (e.g. flexBench -l 0 -stdtables). * We want to monitor what happens with columns COL0, COL2, COL11 * - * or together with the mysqlcluster client; + * or together with the mysql client; * - * shell> mysqlcluster -u root + * shell> mysql -u root * mysql> create database TEST_DB; * mysql> use TEST_DB; - * mysql> create table TAB0 (COL0 int primary key, COL1 int, COL11 int); + * mysql> create table TAB0 (COL0 int primary key, COL1 int, COL11 int) engine=ndb; * * In another window start ndbapi_example5, wait until properly started * @@ -140,6 +140,7 @@ int main() eventTableName, eventColumnName, noEventColumnName); + int j= 0; while (j < 5) { @@ -160,10 +161,9 @@ int main() // set up the callbacks printf("execute\n"); - if (op->execute()) { // This starts changes to "start flowing" - printf("operation execution failed\n"); - exit(-1); - } + // This starts changes to "start flowing" + if (op->execute()) + APIERROR(op->getNdbError()); int i= 0; while(i < 40) { @@ -199,7 +199,7 @@ int main() printf("NULL"); } if (recAttrPre[i]->isNULL() >= 0) { // we have a value - printf(" post[%u]=", i); + printf(" pre[%u]=", i); if (recAttrPre[i]->isNULL() == 0) // we have a non-null value printf("%u", recAttrPre[i]->u_32_value()); else // we have a null value @@ -212,7 +212,7 @@ int main() ;//printf("timed out\n"); } // don't want to listen to events anymore - myNdb->dropEventOperation(op); + if (myNdb->dropEventOperation(op)) APIERROR(myNdb->getNdbError()); j++; } @@ -220,7 +220,8 @@ int main() { NdbDictionary::Dictionary *myDict = myNdb->getDictionary(); if (!myDict) APIERROR(myNdb->getNdbError()); - myDict->dropEvent(eventName); // remove event from database + // remove event from database + if (myDict->dropEvent(eventName)) APIERROR(myDict->getNdbError()); } delete myNdb; @@ -232,37 +233,36 @@ int main() int myCreateEvent(Ndb* myNdb, const char *eventName, const char *eventTableName, - const char **eventColumnName, - const int noEventColumnName) + const char **eventColumnNames, + const int noEventColumnNames) { NdbDictionary::Dictionary *myDict= myNdb->getDictionary(); if (!myDict) APIERROR(myNdb->getNdbError()); - NdbDictionary::Event myEvent(eventName); - myEvent.setTable(eventTableName); + const NdbDictionary::Table *table= myDict->getTable(eventTableName); + if (!table) APIERROR(myDict->getNdbError()); + + NdbDictionary::Event myEvent(eventName, *table); myEvent.addTableEvent(NdbDictionary::Event::TE_ALL); // myEvent.addTableEvent(NdbDictionary::Event::TE_INSERT); // myEvent.addTableEvent(NdbDictionary::Event::TE_UPDATE); // myEvent.addTableEvent(NdbDictionary::Event::TE_DELETE); - for (int i = 0; i < noEventColumnName; i++) - myEvent.addEventColumn(eventColumnName[i]); + myEvent.addEventColumns(noEventColumnNames, eventColumnNames); - int res = myDict->createEvent(myEvent); // Add event to database - - if (res == 0) + // Add event to database + if (myDict->createEvent(myEvent) == 0) myEvent.print(); - else { - printf("Event creation failed\n"); - printf("trying drop Event, maybe event exists\n"); - res = myDict->dropEvent(eventName); - if (res) - exit(-1); + else if (myDict->getNdbError().classification == + NdbError::SchemaObjectExists) { + printf("Event creation failed, event exists\n"); + printf("dropping Event...\n"); + if (myDict->dropEvent(eventName)) APIERROR(myDict->getNdbError()); // try again - res = myDict->createEvent(myEvent); // Add event to database - if (res) - exit(-1); - } + // Add event to database + if ( myDict->createEvent(myEvent)) APIERROR(myDict->getNdbError()); + } else + APIERROR(myDict->getNdbError()); - return res; + return 0; } diff --git a/ndb/examples/ndbapi_retries_example/ndbapi_retries.cpp b/ndb/examples/ndbapi_retries_example/ndbapi_retries.cpp index e832964ee94..d14e0cf4553 100644 --- a/ndb/examples/ndbapi_retries_example/ndbapi_retries.cpp +++ b/ndb/examples/ndbapi_retries_example/ndbapi_retries.cpp @@ -104,7 +104,7 @@ int insert(int transactionId, NdbTransaction* myTransaction) { exit(-1); } - return myTransaction->execute(NoCommit); + return myTransaction->execute(NdbTransaction::NoCommit); } @@ -131,7 +131,7 @@ int executeInsertTransaction(int transactionId, Ndb* myNdb) { result = -1; // Failure } else if (insert(transactionId, myTransaction) || insert(10000+transactionId, myTransaction) || - myTransaction->execute(Commit)) { + myTransaction->execute(NdbTransaction::Commit)) { TRANSERROR(myTransaction); ndberror = myTransaction->getNdbError(); result = -1; // Failure diff --git a/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp b/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp index e886fc08d46..183dd69f648 100644 --- a/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp +++ b/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp @@ -187,7 +187,7 @@ int populate(Ndb * myNdb) myNdbOperation->setValue("COLOR", cars[i].color); } - int check = myTrans->execute(Commit); + int check = myTrans->execute(NdbTransaction::Commit); myTrans->close(); @@ -280,7 +280,7 @@ int scan_delete(Ndb* myNdb, /** * Start scan (NoCommit since we are only reading at this stage); */ - if(myTrans->execute(NoCommit) != 0){ + if(myTrans->execute(NdbTransaction::NoCommit) != 0){ err = myTrans->getNdbError(); if(err.status == NdbError::TemporaryError){ std::cout << myTrans->getNdbError().message << std::endl; @@ -322,7 +322,7 @@ int scan_delete(Ndb* myNdb, */ if(check != -1) { - check = myTrans->execute(Commit); + check = myTrans->execute(NdbTransaction::Commit); } if(check == -1) @@ -453,7 +453,7 @@ int scan_update(Ndb* myNdb, /** * Start scan (NoCommit since we are only reading at this stage); */ - if(myTrans->execute(NoCommit) != 0) + if(myTrans->execute(NdbTransaction::NoCommit) != 0) { err = myTrans->getNdbError(); if(err.status == NdbError::TemporaryError){ @@ -501,7 +501,7 @@ int scan_update(Ndb* myNdb, */ if(check != -1) { - check = myTrans->execute(NoCommit); + check = myTrans->execute(NdbTransaction::NoCommit); } /** @@ -525,7 +525,7 @@ int scan_update(Ndb* myNdb, /** * Commit all prepared operations */ - if(myTrans->execute(Commit) == -1) + if(myTrans->execute(NdbTransaction::Commit) == -1) { if(err.status == NdbError::TemporaryError){ std::cout << myTrans->getNdbError().message << std::endl; @@ -640,7 +640,7 @@ int scan_print(Ndb * myNdb) /** * Start scan (NoCommit since we are only reading at this stage); */ - if(myTrans->execute(NoCommit) != 0){ + if(myTrans->execute(NdbTransaction::NoCommit) != 0){ err = myTrans->getNdbError(); if(err.status == NdbError::TemporaryError){ std::cout << myTrans->getNdbError().message << std::endl; diff --git a/ndb/examples/ndbapi_simple_example/ndbapi_simple.cpp b/ndb/examples/ndbapi_simple_example/ndbapi_simple.cpp index f97032f66ed..82b7abc16eb 100644 --- a/ndb/examples/ndbapi_simple_example/ndbapi_simple.cpp +++ b/ndb/examples/ndbapi_simple_example/ndbapi_simple.cpp @@ -72,7 +72,7 @@ int main() } // Optionally connect and wait for the storage nodes (ndbd's) - if (cluster_connection.wait_until_ready(30,30)) + if (cluster_connection.wait_until_ready(30,0) < 0) { std::cout << "Cluster was not ready within 30 secs.\n"; exit(-1); @@ -92,7 +92,6 @@ int main() run_application(mysql, cluster_connection); } - // ndb_end should not be called until all "Ndb" objects are deleted ndb_end(0); std::cout << "\nTo drop created table use:\n" @@ -170,7 +169,7 @@ static void do_insert(Ndb &myNdb) myOperation->equal("ATTR1", i+5); myOperation->setValue("ATTR2", i+5); - if (myTransaction->execute( Commit ) == -1) + if (myTransaction->execute( NdbTransaction::Commit ) == -1) APIERROR(myTransaction->getNdbError()); myNdb.closeTransaction(myTransaction); @@ -193,7 +192,7 @@ static void do_update(Ndb &myNdb) myOperation->equal( "ATTR1", i ); myOperation->setValue( "ATTR2", i+10); - if( myTransaction->execute( Commit ) == -1 ) + if( myTransaction->execute( NdbTransaction::Commit ) == -1 ) APIERROR(myTransaction->getNdbError()); myNdb.closeTransaction(myTransaction); @@ -214,7 +213,7 @@ static void do_delete(Ndb &myNdb) myOperation->deleteTuple(); myOperation->equal( "ATTR1", 3 ); - if (myTransaction->execute(Commit) == -1) + if (myTransaction->execute(NdbTransaction::Commit) == -1) APIERROR(myTransaction->getNdbError()); myNdb.closeTransaction(myTransaction); @@ -240,7 +239,7 @@ static void do_read(Ndb &myNdb) NdbRecAttr *myRecAttr= myOperation->getValue("ATTR2", NULL); if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError()); - if(myTransaction->execute( Commit ) == -1) + if(myTransaction->execute( NdbTransaction::Commit ) == -1) if (i == 3) { std::cout << "Detected that deleted tuple doesn't exist!" << std::endl; } else { diff --git a/ndb/examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp b/ndb/examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp index 39574fae208..7d70dc723f8 100644 --- a/ndb/examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp +++ b/ndb/examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp @@ -127,7 +127,7 @@ int main() myOperation->equal("ATTR1", i+5); myOperation->setValue("ATTR2", i+5); - if (myTransaction->execute( Commit ) == -1) + if (myTransaction->execute( NdbTransaction::Commit ) == -1) APIERROR(myTransaction->getNdbError()); myNdb->closeTransaction(myTransaction); @@ -152,7 +152,7 @@ int main() NdbRecAttr *myRecAttr= myIndexOperation->getValue("ATTR1", NULL); if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError()); - if(myTransaction->execute( Commit ) != -1) + if(myTransaction->execute( NdbTransaction::Commit ) != -1) printf(" %2d %2d\n", myRecAttr->u_32_value(), i); myNdb->closeTransaction(myTransaction); @@ -173,7 +173,7 @@ int main() myIndexOperation->equal( "ATTR2", i ); myIndexOperation->setValue( "ATTR2", i+10); - if( myTransaction->execute( Commit ) == -1 ) + if( myTransaction->execute( NdbTransaction::Commit ) == -1 ) APIERROR(myTransaction->getNdbError()); myNdb->closeTransaction(myTransaction); @@ -193,7 +193,7 @@ int main() myIndexOperation->deleteTuple(); myIndexOperation->equal( "ATTR2", 3 ); - if (myTransaction->execute(Commit) == -1) + if (myTransaction->execute(NdbTransaction::Commit) == -1) APIERROR(myTransaction->getNdbError()); myNdb->closeTransaction(myTransaction); @@ -218,7 +218,7 @@ int main() NdbRecAttr *myRecAttr= myOperation->getValue("ATTR2", NULL); if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError()); - if(myTransaction->execute( Commit ) == -1) + if(myTransaction->execute( NdbTransaction::Commit ) == -1) if (i == 3) { std::cout << "Detected that deleted tuple doesn't exist!\n"; } else { diff --git a/ndb/include/Makefile.am b/ndb/include/Makefile.am index aa03ef37e3c..1b298a09e26 100644 --- a/ndb/include/Makefile.am +++ b/ndb/include/Makefile.am @@ -44,3 +44,6 @@ dist-hook: -rm -rf `find $(distdir) -type d -name SCCS` windoze-dsp: + +# Don't update the files from bitkeeper +%::SCCS/s.% diff --git a/ndb/include/kernel/signaldata/CreateEvnt.hpp b/ndb/include/kernel/signaldata/CreateEvnt.hpp index e911fa36ce6..72dab96f8b6 100644 --- a/ndb/include/kernel/signaldata/CreateEvnt.hpp +++ b/ndb/include/kernel/signaldata/CreateEvnt.hpp @@ -17,6 +17,7 @@ #ifndef CREATE_EVNT_HPP #define CREATE_EVNT_HPP +#include #include "SignalData.hpp" #include #include @@ -101,7 +102,7 @@ public: Busy = 701, NotMaster = 702, SeizeError = 703, - EventNotFound = 4238, + EventNotFound = 4710, EventNameTooLong = 4241, TooManyEvents = 4242, BadRequestType = 4247, @@ -363,11 +364,10 @@ struct CreateEvntRef { Busy = 701, NotMaster = 702, SeizeError = 703, - EventNotFound = 4238, - EventExists = 4239, - EventNameTooLong = 4241, - TooManyEvents = 4242, - // EventExists = 4244, + TooManyEvents = 4707, + EventNameTooLong = 4708, + EventNameExists = 746, + EventNotFound = 4731, AttributeNotStored = 4245, AttributeNullable = 4246, BadRequestType = 4247, @@ -376,7 +376,7 @@ struct CreateEvntRef { InvalidEventType = 4250, NotUnique = 4251, AllocationError = 4252, - CreateEventTableFailed = 4253, + CreateEventTableFailed = 4711, InvalidAttributeOrder = 4255, Temporary = 0x1 << 16 }; diff --git a/ndb/include/kernel/signaldata/DictTabInfo.hpp b/ndb/include/kernel/signaldata/DictTabInfo.hpp index 616da05b3ae..cc8a647615c 100644 --- a/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -269,7 +269,9 @@ public: ExtTimespec = NdbSqlUtil::Type::Timespec, ExtBlob = NdbSqlUtil::Type::Blob, ExtText = NdbSqlUtil::Type::Text, - ExtBit = NdbSqlUtil::Type::Bit + ExtBit = NdbSqlUtil::Type::Bit, + ExtLongvarchar = NdbSqlUtil::Type::Longvarchar, + ExtLongvarbinary = NdbSqlUtil::Type::Longvarbinary }; // Attribute data interpretation @@ -297,98 +299,91 @@ public: return ((1 << AttributeSize) * AttributeArraySize + 31) >> 5; } - // translate to old kernel types and sizes + // compute old-sty|e attribute size and array size inline bool translateExtType() { - AttributeType = ~0; // deprecated switch (AttributeExtType) { case DictTabInfo::ExtUndefined: - break; + return false; case DictTabInfo::ExtTinyint: - AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = AttributeExtLength; - return true; case DictTabInfo::ExtTinyunsigned: AttributeSize = DictTabInfo::an8Bit; AttributeArraySize = AttributeExtLength; - return true; + break; case DictTabInfo::ExtSmallint: - AttributeSize = DictTabInfo::a16Bit; - AttributeArraySize = AttributeExtLength; - return true; case DictTabInfo::ExtSmallunsigned: AttributeSize = DictTabInfo::a16Bit; AttributeArraySize = AttributeExtLength; - return true; + break; case DictTabInfo::ExtMediumint: - AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = 3 * AttributeExtLength; - return true; case DictTabInfo::ExtMediumunsigned: AttributeSize = DictTabInfo::an8Bit; AttributeArraySize = 3 * AttributeExtLength; - return true; + break; case DictTabInfo::ExtInt: - AttributeSize = DictTabInfo::a32Bit; - AttributeArraySize = AttributeExtLength; - return true; case DictTabInfo::ExtUnsigned: AttributeSize = DictTabInfo::a32Bit; AttributeArraySize = AttributeExtLength; - return true; + break; case DictTabInfo::ExtBigint: - AttributeSize = DictTabInfo::a64Bit; - AttributeArraySize = AttributeExtLength; - return true; case DictTabInfo::ExtBigunsigned: AttributeSize = DictTabInfo::a64Bit; AttributeArraySize = AttributeExtLength; - return true; + break; case DictTabInfo::ExtFloat: AttributeSize = DictTabInfo::a32Bit; AttributeArraySize = AttributeExtLength; - return true; + break; case DictTabInfo::ExtDouble: AttributeSize = DictTabInfo::a64Bit; AttributeArraySize = AttributeExtLength; - return true; + break; case DictTabInfo::ExtDecimal: // not yet implemented anywhere - break; + return false; case DictTabInfo::ExtChar: case DictTabInfo::ExtBinary: AttributeSize = DictTabInfo::an8Bit; AttributeArraySize = AttributeExtLength; - return true; + break; case DictTabInfo::ExtVarchar: case DictTabInfo::ExtVarbinary: - // to fix + if (AttributeExtLength > 0xff) + return false; AttributeSize = DictTabInfo::an8Bit; - AttributeArraySize = AttributeExtLength + 2; - return true; + AttributeArraySize = AttributeExtLength + 1; + break; case DictTabInfo::ExtDatetime: // to fix AttributeSize = DictTabInfo::an8Bit; AttributeArraySize = 8 * AttributeExtLength; - return true; + break; case DictTabInfo::ExtTimespec: // to fix AttributeSize = DictTabInfo::an8Bit; AttributeArraySize = 12 * AttributeExtLength; - return true; + break; case DictTabInfo::ExtBlob: case DictTabInfo::ExtText: AttributeSize = DictTabInfo::an8Bit; - // head + inline part [ attr precision lower half ] + // head + inline part (length in precision lower half) AttributeArraySize = (NDB_BLOB_HEAD_SIZE << 2) + (AttributeExtPrecision & 0xFFFF); - return true; + break; case DictTabInfo::ExtBit: AttributeSize = DictTabInfo::aBit; AttributeArraySize = AttributeExtLength; - return true; + break; + case DictTabInfo::ExtLongvarchar: + case DictTabInfo::ExtLongvarbinary: + if (AttributeExtLength > 0xffff) + return false; + AttributeSize = DictTabInfo::an8Bit; + AttributeArraySize = AttributeExtLength + 2; + break; + default: + return false; }; - - return false; + return true; } inline void print(FILE *out) { diff --git a/ndb/include/ndb_constants.h b/ndb/include/ndb_constants.h index 04d86e267f7..40a3d963955 100644 --- a/ndb/include/ndb_constants.h +++ b/ndb/include/ndb_constants.h @@ -21,7 +21,7 @@ * Changing the values makes database upgrade impossible. * * New or removed definitions must be replicated to - * NdbDictionary.hpp and NdbSqlUtil.cpp. + * NdbDictionary.hpp and NdbSqlUtil.hpp. * * Not for use by application programs. * Use the enums provided by NdbDictionary instead. @@ -58,7 +58,9 @@ #define NDB_TYPE_BLOB 20 #define NDB_TYPE_TEXT 21 #define NDB_TYPE_BIT 22 +#define NDB_TYPE_LONG_VARCHAR 23 +#define NDB_TYPE_LONG_VARBINARY 24 -#define NDB_TYPE_MAX 23 +#define NDB_TYPE_MAX 25 #endif diff --git a/ndb/include/ndbapi/NdbBlob.hpp b/ndb/include/ndbapi/NdbBlob.hpp index c02e10b7b76..f9090cb91ba 100644 --- a/ndb/include/ndbapi/NdbBlob.hpp +++ b/ndb/include/ndbapi/NdbBlob.hpp @@ -309,8 +309,8 @@ private: int invokeActiveHook(); // blob handle maintenance int atPrepare(NdbTransaction* aCon, NdbOperation* anOp, const NdbColumnImpl* aColumn); - int preExecute(ExecType anExecType, bool& batch); - int postExecute(ExecType anExecType); + int preExecute(NdbTransaction::ExecType anExecType, bool& batch); + int postExecute(NdbTransaction::ExecType anExecType); int preCommit(); int atNextResult(); // errors diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index f25b3a7fbfa..553d85f4129 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -149,14 +149,20 @@ public: /** * @class Column - * @brief Represents an column in an NDB Cluster table + * @brief Represents a column in an NDB Cluster table * - * Each column has a type. The type of a column is determind by a number + * Each column has a type. The type of a column is determined by a number * of type specifiers. * The type specifiers are: * - Builtin type * - Array length or max length - * - Precision and scale + * - Precision and scale (not used yet) + * - Character set for string types + * - Inline and part sizes for blobs + * + * Types in general correspond to MySQL types and their variants. + * Data formats are same as in MySQL. NDB API provides no support for + * constructing such formats. NDB kernel checks them however. */ class Column { public: @@ -179,14 +185,16 @@ public: Double = NDB_TYPE_DOUBLE, ///< 64-bit float. 8 byte float, can be used in array Decimal = NDB_TYPE_DECIMAL, ///< Precision, Scale are applicable Char = NDB_TYPE_CHAR, ///< Len. A fixed array of 1-byte chars - Varchar = NDB_TYPE_VARCHAR, ///< Max len + Varchar = NDB_TYPE_VARCHAR, ///< Length bytes: 1, Max: 255 Binary = NDB_TYPE_BINARY, ///< Len - Varbinary = NDB_TYPE_VARBINARY, ///< Max len + Varbinary = NDB_TYPE_VARBINARY, ///< Length bytes: 1, Max: 255 Datetime = NDB_TYPE_DATETIME, ///< Precision down to 1 sec (sizeof(Datetime) == 8 bytes ) Timespec = NDB_TYPE_TIMESPEC, ///< Precision down to 1 nsec(sizeof(Datetime) == 12 bytes ) Blob = NDB_TYPE_BLOB, ///< Binary large object (see NdbBlob) - Text = NDB_TYPE_TEXT, ///< Text blob, - Bit = NDB_TYPE_BIT ///< Bit, length specifies no of bits + Text = NDB_TYPE_TEXT, ///< Text blob + Bit = NDB_TYPE_BIT, ///< Bit, length specifies no of bits + Longvarchar = NDB_TYPE_LONG_VARCHAR, ///< Length bytes: 2, little-endian + Longvarbinary = NDB_TYPE_LONG_VARBINARY ///< Length bytes: 2, little-endian }; /** @@ -698,7 +706,7 @@ public: /** @} *******************************************************************/ -#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL void setStoredTable(bool x) { setLogging(x); } bool getStoredTable() const { return getLogging(); } @@ -901,34 +909,120 @@ public: */ class Event : public Object { public: - enum TableEvent { TE_INSERT=1, TE_DELETE=2, TE_UPDATE=4, TE_ALL=7 }; + enum TableEvent { + TE_INSERT=1, ///< Insert event on table + TE_DELETE=2, ///< Delete event on table + TE_UPDATE=4, ///< Update event on table + TE_ALL=7 ///< Any/all event on table (not relevant when + ///< events are received) + }; enum EventDurability { - ED_UNDEFINED = 0, + ED_UNDEFINED +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + = 0 +#endif #if 0 // not supported - ED_SESSION = 1, + ,ED_SESSION = 1, // Only this API can use it // and it's deleted after api has disconnected or ndb has restarted - ED_TEMPORARY = 2, + ED_TEMPORARY = 2 // All API's can use it, // But's its removed when ndb is restarted -#endif - ED_PERMANENT = 3 - // All API's can use it, - // It's still defined after a restart +#endif + ,ED_PERMANENT ///< All API's can use it, + ///< It's still defined after a restart +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + = 3 +#endif }; - + + /* + * Constructor + * @param name Name of event + */ Event(const char *name); + /* + * Constructor + * @param name Name of event + * @param table Reference retrieved from NdbDictionary + */ + Event(const char *name, const NdbDictionary::Table& table); virtual ~Event(); - void setName(const char *); - void setTable(const char *); - void addTableEvent(const TableEvent); - void setDurability(const EventDurability); + /** + * Set/get unique identifier for the event + */ + void setName(const char *name); + const char *getName() const; + /** + * Define table on which events should be detected + * + * @note calling this method will default to detection + * of events on all columns. Calling subsequent + * addEventColumn calls will override this. + * + * @param table reference retrieved from NdbDictionary + */ + void setTable(const NdbDictionary::Table& table); + /** + * Set table for which events should be detected + * + * @note preferred way is using setTable(const NdbDictionary::Table) + * or constructor with table object parameter + */ + void setTable(const char *tableName); + /** + * Get table name for events + * + * @return table name + */ + const char* getTableName() const; + /** + * Add type of event that should be detected + */ + void addTableEvent(const TableEvent te); + /** + * Get/set durability of the event + */ + void setDurability(EventDurability ed); + EventDurability getDurability() const; +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL void addColumn(const Column &c); +#endif + /** + * Add a column on which events should be detected + * + * @param attrId Column id + * + * @note errors will mot be detected until createEvent() is called + */ void addEventColumn(unsigned attrId); + /** + * Add a column on which events should be detected + * + * @param columnName Column name + * + * @note errors will not be detected until createEvent() is called + */ void addEventColumn(const char * columnName); + /** + * Add several columns on which events should be detected + * + * @param n Number of columns + * @param columnNames Column names + * + * @note errors will mot be detected until + * NdbDictionary::Dictionary::createEvent() is called + */ void addEventColumns(int n, const char ** columnNames); + /** + * Get no of columns defined in an Event + * + * @return Number of columns, -1 on error + */ + int getNoOfEventColumns() const; + /** * Get object status */ @@ -939,7 +1033,9 @@ public: */ virtual int getObjectVersion() const; +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL void print(); +#endif private: #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL @@ -1010,6 +1106,8 @@ public: * Fetch list of all objects, optionally restricted to given type. */ int listObjects(List & list, Object::Type type = Object::TypeUndefined); + int listObjects(List & list, + Object::Type type = Object::TypeUndefined) const; /** * Get the latest error @@ -1048,6 +1146,7 @@ public: * @return 0 if successful, otherwise -1 */ int listIndexes(List & list, const char * tableName); + int listIndexes(List & list, const char * tableName) const; /** @} *******************************************************************/ /** diff --git a/ndb/include/ndbapi/NdbError.hpp b/ndb/include/ndbapi/NdbError.hpp index 45dbd5d3995..698b3c10d57 100644 --- a/ndb/include/ndbapi/NdbError.hpp +++ b/ndb/include/ndbapi/NdbError.hpp @@ -168,7 +168,12 @@ struct NdbError { /** * Node shutdown */ - NodeShutdown = ndberror_cl_node_shutdown + NodeShutdown = ndberror_cl_node_shutdown, + + /** + * Schema object already exists + */ + SchemaObjectExists = ndberror_cl_schema_object_already_exists }; /** diff --git a/ndb/include/ndbapi/NdbEventOperation.hpp b/ndb/include/ndbapi/NdbEventOperation.hpp index 8c2de36fc51..a0004558b39 100644 --- a/ndb/include/ndbapi/NdbEventOperation.hpp +++ b/ndb/include/ndbapi/NdbEventOperation.hpp @@ -14,19 +14,6 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/***************************************************************************** - * Name: NdbEventOperation.hpp - * Include: - * Link: - * Author: Tomas Ulin MySQL AB - * Date: 2003-11-21 - * Version: 0.1 - * Description: Event support - * Documentation: - * Adjust: 2003-11-21 Tomas Ulin First version. - * Adjust: 2003-12-11 Tomas Ulin Alpha Release. - ****************************************************************************/ - #ifndef NdbEventOperation_H #define NdbEventOperation_H @@ -37,73 +24,78 @@ class NdbEventOperationImpl; * @class NdbEventOperation * @brief Class of operations for getting change events from database. * - * An NdbEventOperation object is instantiated by - * Ndb::createEventOperation + * Brief description on how to work with events: * - * Prior to that an event must have been created in the Database through - * NdbDictionary::createEvent - * - * The instance is removed by Ndb::dropEventOperation + * - An event i created in the Database through + * NdbDictionary::Dictionary::createEvent() (note that this can be done + * by any application or thread and not necessarily by the "listener") + * - To listen to events, an NdbEventOperation object is instantiated by + * Ndb::createEventOperation() + * - execute() starts the event flow. Use Ndb::pollEvents() to wait + * for an event to occur. Use next() to iterate + * through the events that have occured. + * - The instance is removed by Ndb::dropEventOperation() * * For more info see: * @ref ndbapi_event.cpp * * Known limitations: * - * Maximum number of active NdbEventOperations are now set at compile time. + * - Maximum number of active NdbEventOperations are now set at compile time. * Today 100. This will become a configuration parameter later. - * - * Maximum number of NdbEventOperations tied to same event are maximum 16 + * - Maximum number of NdbEventOperations tied to same event are maximum 16 * per process. * * Known issues: * - * When several NdbEventOperation's are tied to the same event in the same + * - When several NdbEventOperation's are tied to the same event in the same * process they will share the circular buffer. The BufferLength will then * be the same for all and decided by the first NdbEventOperation * instantiation. Just make sure to instantiate the "largest" one first. - * - * Today all events INSERT/DELETE/UPDATE and all changed attributes are + * - Today all events INSERT/DELETE/UPDATE and all changed attributes are * sent to the API, even if only specific attributes have been specified. * These are however hidden from the user and only relevant data is shown * after next(). - * However false exits from Ndb::pollEvents() may occur and thus + * - "False" exits from Ndb::pollEvents() may occur and thus * the subsequent next() will return zero, * since there was no available data. Just do Ndb::pollEvents() again. - * - * Event code does not check table schema version. Make sure to drop events + * - Event code does not check table schema version. Make sure to drop events * after table is dropped. Will be fixed in later * versions. - * - * If a node failure has occured not all events will be recieved + * - If a node failure has occured not all events will be recieved * anymore. Drop NdbEventOperation and Create again after nodes are up * again. Will be fixed in later versions. * * Test status: - * Tests have been run on 1-node and 2-node systems * - * Known bugs: - * - * None, except if we can call some of the "issues" above bugs + * - Tests have been run on 1-node and 2-node systems * * Useful API programs: * - * ndb_select_all -d sys 'NDB$EVENTS_0' - * Will show contents in the system table containing created events. + * - ndb_select_all -d sys 'NDB$EVENTS_0' + * shows contents in the system table containing created events. * + * @note this is an inteface to viewing events that is subject to change */ class NdbEventOperation { public: + /** + * State of the NdbEventOperation object + */ + enum State { + EO_CREATED, ///< Created but execute() not called + EO_EXECUTING, ///< execute() called + EO_ERROR ///< An error has occurred. Object unusable. + }; /** * Retrieve current state of the NdbEventOperation object */ - enum State {CREATED,EXECUTING,ERROR}; State getState(); /** * Activates the NdbEventOperation to start receiving events. The * changed attribute values may be retrieved after next() has returned - * a value greater than zero. The getValue() methods below must be called + * a value greater than zero. The getValue() methods must be called * prior to execute(). * * @return 0 if successful otherwise -1. @@ -125,21 +117,21 @@ public: * aligned appropriately. The buffer is used directly * (avoiding a copy penalty) only if it is aligned on a * 4-byte boundary and the attribute size in bytes - * (i.e. NdbRecAttr::attrSize times NdbRecAttr::arraySize is + * (i.e. NdbRecAttr::attrSize() times NdbRecAttr::arraySize() is * a multiple of 4). * - * @note There are two versions, NdbOperation::getValue and - * NdbOperation::getPreValue for retrieving the current and + * @note There are two versions, getValue() and + * getPreValue() for retrieving the current and * previous value repectively. * * @note This method does not fetch the attribute value from * the database! The NdbRecAttr object returned by this method * is not readable/printable before the - * NdbEventConnection::execute has been made and - * NdbEventConnection::next has returned a value greater than + * execute() has been made and + * next() has returned a value greater than * zero. If a specific attribute has not changed the corresponding * NdbRecAttr will be in state UNDEFINED. This is checked by - * NdbRecAttr::isNull which then returns -1. + * NdbRecAttr::isNull() which then returns -1. * * @param anAttrName Attribute name * @param aValue If this is non-NULL, then the attribute value @@ -156,11 +148,11 @@ public: /** * Retrieves event resultset if available, inserted into the NdbRecAttrs * specified in getValue() and getPreValue(). To avoid polling for - * a resultset, one can use Ndb::pollEvents + * a resultset, one can use Ndb::pollEvents() * which will wait on a mutex until an event occurs or the specified * timeout occurs. * - * @return >=0 if successful otherwise -1. Return value inicates number + * @return >=0 if successful otherwise -1. Return value indicates number * of available events. By sending pOverRun one may query for buffer * overflow and *pOverRun will indicate the number of events that have * overwritten. @@ -199,6 +191,13 @@ public: */ Uint32 getLatestGCI(); + /** + * Get the latest error + * + * @return Error object. + */ + const struct NdbError & getNdbError() const; + #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL /* * diff --git a/ndb/include/ndbapi/NdbOperation.hpp b/ndb/include/ndbapi/NdbOperation.hpp index 58ecb57ded3..06715233726 100644 --- a/ndb/include/ndbapi/NdbOperation.hpp +++ b/ndb/include/ndbapi/NdbOperation.hpp @@ -59,10 +59,17 @@ public: */ enum LockMode { - LM_Read = 0, - LM_Exclusive = 1, - LM_CommittedRead = 2, + LM_Read ///< Read with shared lock #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + = 0 +#endif + ,LM_Exclusive ///< Read with exclusive lock +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + = 1 +#endif + ,LM_CommittedRead ///< Ignore locks, read last committed value +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + = 2, LM_Dirty = 2 #endif }; diff --git a/ndb/include/ndbapi/NdbTransaction.hpp b/ndb/include/ndbapi/NdbTransaction.hpp index 56d87d4b6c0..78724206b4f 100644 --- a/ndb/include/ndbapi/NdbTransaction.hpp +++ b/ndb/include/ndbapi/NdbTransaction.hpp @@ -42,34 +42,22 @@ class NdbBlob; typedef void (* NdbAsynchCallback)(int, NdbTransaction*, void*); #endif -/** - * Commit type of transaction - */ -enum AbortOption { #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - CommitIfFailFree = 0, - CommitAsMuchAsPossible = 2, ///< Commit transaction with as many - TryCommit = 0, ///< Missing explanation -#endif - AbortOnError = 0, ///< Abort transaction on failed operation - AO_IgnoreError = 2 ///< Transaction continues on failed operation +enum AbortOption { + CommitIfFailFree= 0, + TryCommit= 0, + AbortOnError= 0, + CommitAsMuchAsPossible= 2, + AO_IgnoreError= 2 }; - -typedef AbortOption CommitType; - - -/** - * Execution type of transaction - */ enum ExecType { - NoExecTypeDef = -1, ///< Erroneous type (Used for debugging only) - Prepare, ///< Missing explanation - NoCommit, ///< Execute the transaction as far as it has - ///< been defined, but do not yet commit it - Commit, ///< Execute and try to commit the transaction - Rollback ///< Rollback transaction + NoExecTypeDef = -1, + Prepare, + NoCommit, + Commit, + Rollback }; - +#endif /** * @class NdbTransaction @@ -99,17 +87,17 @@ enum ExecType { * before calling execute(). * * A call to execute() uses one out of three types of execution: - * -# ExecType::NoCommit Executes operations without committing them. - * -# ExecType::Commit Executes remaining operation and commits the + * -# NdbTransaction::NoCommit Executes operations without committing them. + * -# NdbTransaction::Commit Executes remaining operation and commits the * complete transaction - * -# ExecType::Rollback Rollbacks the entire transaction. + * -# NdbTransaction::Rollback Rollbacks the entire transaction. * * execute() is equipped with an extra error handling parameter. * There are two alternatives: - * -# AbortOption::AbortOnError (default). + * -# NdbTransaction::AbortOnError (default). * The transaction is aborted if there are any error during the * execution - * -# AbortOption::IgnoreError + * -# NdbTransaction::AO_IgnoreError * Continue execution of transaction even if operation fails * */ @@ -141,6 +129,7 @@ enum ExecType { * primary key since it came along from the scanned tuple. * */ + class NdbTransaction { #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL @@ -154,6 +143,44 @@ class NdbTransaction public: + /** + * Commit type of transaction + */ + enum AbortOption { + AbortOnError= ///< Abort transaction on failed operation +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + ::AbortOnError +#endif + ,AO_IgnoreError= ///< Transaction continues on failed operation +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + ::AO_IgnoreError +#endif + }; + + /** + * Execution type of transaction + */ + enum ExecType { +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + NoExecTypeDef= + ::NoExecTypeDef, ///< Erroneous type (Used for debugging only) + Prepare= ::Prepare, ///< Missing explanation +#endif + NoCommit= ///< Execute the transaction as far as it has + ///< been defined, but do not yet commit it +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + ::NoCommit +#endif + ,Commit= ///< Execute and try to commit the transaction +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + ::Commit +#endif + ,Rollback ///< Rollback transaction +#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL + = ::Rollback +#endif + }; + /** * Get an NdbOperation for a table. * Note that the operation has to be defined before it is executed. @@ -281,9 +308,15 @@ public: * the send. * @return 0 if successful otherwise -1. */ - int execute(ExecType execType, + int execute(ExecType execType, AbortOption abortOption = AbortOnError, int force = 0 ); +#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED + int execute(::ExecType execType, + ::AbortOption abortOption = ::AbortOnError, + int force = 0 ) + { return execute ((ExecType)execType,(AbortOption)abortOption,force); } +#endif #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL // to be documented later @@ -314,6 +347,14 @@ public: NdbAsynchCallback callback, void* anyObject, AbortOption abortOption = AbortOnError); +#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED + void executeAsynchPrepare(::ExecType execType, + NdbAsynchCallback callback, + void* anyObject, + ::AbortOption abortOption = ::AbortOnError) + { executeAsynchPrepare((ExecType)execType, callback, anyObject, + (AbortOption)abortOption); } +#endif /** * Prepare and send an asynchronous transaction. @@ -332,6 +373,14 @@ public: NdbAsynchCallback aCallback, void* anyObject, AbortOption abortOption = AbortOnError); +#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED + void executeAsynch(::ExecType aTypeOfExec, + NdbAsynchCallback aCallback, + void* anyObject, + ::AbortOption abortOption= ::AbortOnError) + { executeAsynch((ExecType)aTypeOfExec, aCallback, anyObject, + (AbortOption)abortOption); } +#endif #endif /** * Refresh diff --git a/ndb/include/ndbapi/ndberror.h b/ndb/include/ndbapi/ndberror.h index ceb1881a4cc..2c5a41405db 100644 --- a/ndb/include/ndbapi/ndberror.h +++ b/ndb/include/ndbapi/ndberror.h @@ -47,7 +47,8 @@ typedef enum ndberror_cl_function_not_implemented = 13, ndberror_cl_unknown_error_code = 14, ndberror_cl_node_shutdown = 15, - ndberror_cl_configuration = 16 + ndberror_cl_configuration = 16, + ndberror_cl_schema_object_already_exists = 17 } ndberror_classification_enum; diff --git a/ndb/include/util/NdbSqlUtil.hpp b/ndb/include/util/NdbSqlUtil.hpp index 8ea3e9c8124..feb2b97c54b 100644 --- a/ndb/include/util/NdbSqlUtil.hpp +++ b/ndb/include/util/NdbSqlUtil.hpp @@ -20,6 +20,9 @@ #include #include +struct charset_info_st; +typedef struct charset_info_st CHARSET_INFO; + class NdbSqlUtil { public: /** @@ -86,9 +89,11 @@ public: Timespec = NDB_TYPE_TIMESPEC, Blob = NDB_TYPE_BLOB, Text = NDB_TYPE_TEXT, - Bit = NDB_TYPE_BIT + Bit = NDB_TYPE_BIT, + Longvarchar = NDB_TYPE_LONG_VARCHAR, + Longvarbinary = NDB_TYPE_LONG_VARBINARY }; - Enum m_typeId; + Enum m_typeId; // redundant Cmp* m_cmp; // comparison method }; @@ -98,16 +103,29 @@ public: static const Type& getType(Uint32 typeId); /** - * Get type by id but replace char type by corresponding binary type. + * Get the normalized type used in hashing and key comparisons. + * Maps all string types to Binary. */ static const Type& getTypeBinary(Uint32 typeId); /** * Check character set. */ - static bool usable_in_pk(Uint32 typeId, const void* cs); - static bool usable_in_hash_index(Uint32 typeId, const void* cs); - static bool usable_in_ordered_index(Uint32 typeId, const void* cs); + static bool usable_in_pk(Uint32 typeId, const void* info); + static bool usable_in_hash_index(Uint32 typeId, const void* info); + static bool usable_in_ordered_index(Uint32 typeId, const void* info); + + /** + * Get number of length bytes and length from variable length string. + * Returns false on error (invalid data). For other types returns + * zero length bytes and the fixed attribute length. + */ + static bool get_var_length(Uint32 typeId, const void* p, unsigned attrlen, Uint32& lb, Uint32& len); + + /** + * Temporary workaround for bug#7284. + */ + static int strnxfrm_bug7284(CHARSET_INFO* cs, unsigned char* dst, unsigned dstLen, const unsigned char*src, unsigned srcLen); private: /** @@ -138,6 +156,9 @@ private: static Cmp cmpTimespec; static Cmp cmpBlob; static Cmp cmpText; + static Cmp cmpBit; + static Cmp cmpLongvarchar; + static Cmp cmpLongvarbinary; }; #endif diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp index 9ed791d2803..fd23781605c 100644 --- a/ndb/src/common/util/NdbSqlUtil.cpp +++ b/ndb/src/common/util/NdbSqlUtil.cpp @@ -71,7 +71,7 @@ NdbSqlUtil::char_like(const char* s1, unsigned n1, } /* - * Data types. + * Data types. The entries must be in the numerical order. */ const NdbSqlUtil::Type @@ -138,7 +138,7 @@ NdbSqlUtil::m_typeList[] = { }, { Type::Varchar, - NULL // cmpVarchar + cmpVarchar }, { Type::Binary, @@ -146,7 +146,7 @@ NdbSqlUtil::m_typeList[] = { }, { Type::Varbinary, - NULL // cmpVarbinary + cmpVarbinary }, { Type::Datetime, @@ -163,6 +163,18 @@ NdbSqlUtil::m_typeList[] = { { Type::Text, NULL // cmpText + }, + { + Type::Bit, + NULL // cmpBit + }, + { + Type::Longvarchar, + cmpLongvarchar + }, + { + Type::Longvarbinary, + cmpLongvarbinary } }; @@ -181,10 +193,12 @@ NdbSqlUtil::getTypeBinary(Uint32 typeId) { switch (typeId) { case Type::Char: - typeId = Type::Binary; - break; case Type::Varchar: - typeId = Type::Varbinary; + case Type::Binary: + case Type::Varbinary: + case Type::Longvarchar: + case Type::Longvarbinary: + typeId = Type::Binary; break; case Type::Text: typeId = Type::Blob; @@ -425,11 +439,27 @@ NdbSqlUtil::cmpChar(const void* info, const void* p1, unsigned n1, const void* p return k < 0 ? -1 : k > 0 ? +1 : 0; } -// waiting for MySQL and new NDB implementation int NdbSqlUtil::cmpVarchar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) { - assert(false); + const unsigned lb = 1; + // collation does not work on prefix for some charsets + assert(full && n1 >= lb && n2 >= lb); + const uchar* v1 = (const uchar*)p1; + const uchar* v2 = (const uchar*)p2; + unsigned m1 = *v1; + unsigned m2 = *v2; + if (m1 <= n1 - lb && m2 <= n2 - lb) { + CHARSET_INFO* cs = (CHARSET_INFO*)(info); + // compare with space padding + int k = (*cs->coll->strnncollsp)(cs, v1 + lb, m1, v2 + lb, m2, false); + return k < 0 ? -1 : k > 0 ? +1 : 0; + } + // treat bad data as NULL + if (m1 > n1 - lb && m2 <= n2 - lb) + return -1; + if (m1 <= n1 - lb && m2 > n2 - lb) + return +1; return 0; } @@ -442,20 +472,39 @@ NdbSqlUtil::cmpBinary(const void* info, const void* p1, unsigned n1, const void* unsigned n = (n1 <= n2 ? n1 : n2); int k = memcmp(v1, v2, n); if (k == 0) { - if (full) - k = (int)n1 - (int)n2; - else - k = (int)n - (int)n2; + k = (full ? n1 : n) - n2; } return k < 0 ? -1 : k > 0 ? +1 : full ? 0 : CmpUnknown; } -// waiting for MySQL and new NDB implementation int NdbSqlUtil::cmpVarbinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) { - assert(false); - return 0; + const unsigned lb = 1; + if (n2 >= lb) { + assert(n1 >= lb); + const uchar* v1 = (const uchar*)p1; + const uchar* v2 = (const uchar*)p2; + unsigned m1 = *v1; + unsigned m2 = *v2; + if (m1 <= n1 - lb && m2 <= n2 - lb) { + // compare as binary strings + unsigned m = (m1 <= m2 ? m1 : m2); + int k = memcmp(v1 + lb, v2 + lb, m); + if (k == 0) { + k = (full ? m1 : m) - m2; + } + return k < 0 ? -1 : k > 0 ? +1 : full ? 0 : CmpUnknown; + } + // treat bad data as NULL + if (m1 > n1 - lb && m2 <= n2 - lb) + return -1; + if (m1 <= n1 - lb && m2 > n2 - lb) + return +1; + return 0; + } + assert(! full); + return CmpUnknown; } // allowed but ordering is wrong before wl-1442 done @@ -489,6 +538,68 @@ NdbSqlUtil::cmpText(const void* info, const void* p1, unsigned n1, const void* p return 0; } +// not yet +int +NdbSqlUtil::cmpBit(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) +{ + assert(false); + return 0; +} + +int +NdbSqlUtil::cmpLongvarchar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) +{ + const unsigned lb = 2; + // collation does not work on prefix for some charsets + assert(full && n1 >= lb && n2 >= lb); + const uchar* v1 = (const uchar*)p1; + const uchar* v2 = (const uchar*)p2; + unsigned m1 = uint2korr(v1); + unsigned m2 = uint2korr(v2); + if (m1 <= n1 - lb && m2 <= n2 - lb) { + CHARSET_INFO* cs = (CHARSET_INFO*)(info); + // compare with space padding + int k = (*cs->coll->strnncollsp)(cs, v1 + lb, m1, v2 + lb, m2, false); + return k < 0 ? -1 : k > 0 ? +1 : 0; + } + // treat bad data as NULL + if (m1 > n1 - lb && m2 <= n2 - lb) + return -1; + if (m1 <= n1 - lb && m2 > n2 - lb) + return +1; + return 0; +} + +int +NdbSqlUtil::cmpLongvarbinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full) +{ + const unsigned lb = 2; + if (n2 >= lb) { + assert(n1 >= lb); + const uchar* v1 = (const uchar*)p1; + const uchar* v2 = (const uchar*)p2; + unsigned m1 = uint2korr(v1); + unsigned m2 = uint2korr(v2); + if (m1 <= n1 - lb && m2 <= n2 - lb) { + // compare as binary strings + unsigned m = (m1 <= m2 ? m1 : m2); + int k = memcmp(v1 + lb, v2 + lb, m); + if (k == 0) { + k = (full ? m1 : m) - m2; + } + return k < 0 ? -1 : k > 0 ? +1 : full ? 0 : CmpUnknown; + } + // treat bad data as NULL + if (m1 > n1 - lb && m2 <= n2 - lb) + return -1; + if (m1 <= n1 - lb && m2 > n2 - lb) + return +1; + return 0; + } + assert(! full); + return CmpUnknown; +} + // check charset bool @@ -508,8 +619,6 @@ NdbSqlUtil::usable_in_pk(Uint32 typeId, const void* info) } break; case Type::Undefined: - case Type::Varchar: - case Type::Varbinary: case Type::Blob: case Type::Text: break; @@ -545,8 +654,6 @@ NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info) } break; case Type::Undefined: - case Type::Varchar: - case Type::Varbinary: case Type::Blob: case Type::Text: break; @@ -555,3 +662,68 @@ NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info) } return false; } + +// utilities + +bool +NdbSqlUtil::get_var_length(Uint32 typeId, const void* p, unsigned attrlen, Uint32& lb, Uint32& len) +{ + const unsigned char* const src = (const unsigned char*)p; + switch (typeId) { + case NdbSqlUtil::Type::Varchar: + case NdbSqlUtil::Type::Varbinary: + lb = 1; + if (attrlen >= lb) { + len = src[0]; + if (attrlen >= lb + len) + return true; + } + break; + case NdbSqlUtil::Type::Longvarchar: + case NdbSqlUtil::Type::Longvarbinary: + lb = 2; + if (attrlen >= lb) { + len = src[0] + (src[1] << 8); + if (attrlen >= lb + len) + return true; + } + break; + default: + lb = 0; + len = attrlen; + return true; + break; + } + return false; +} + +// workaround + +int +NdbSqlUtil::strnxfrm_bug7284(CHARSET_INFO* cs, unsigned char* dst, unsigned dstLen, const unsigned char*src, unsigned srcLen) +{ + unsigned char nsp[20]; // native space char + unsigned char xsp[20]; // strxfrm-ed space char +#ifdef VM_TRACE + memset(nsp, 0x1f, sizeof(nsp)); + memset(xsp, 0x1f, sizeof(xsp)); +#endif + // convert from unicode codepoint for space + int n1 = (*cs->cset->wc_mb)(cs, (my_wc_t)0x20, nsp, nsp + sizeof(nsp)); + if (n1 <= 0) + return -1; + // strxfrm to binary + int n2 = (*cs->coll->strnxfrm)(cs, xsp, sizeof(xsp), nsp, n1); + if (n2 <= 0) + return -1; + // strxfrm argument string - returns no error indication + int n3 = (*cs->coll->strnxfrm)(cs, dst, dstLen, src, srcLen); + // pad with strxfrm-ed space chars + int n4 = n3; + while (n4 < (int)dstLen) { + dst[n4] = xsp[(n4 - n3) % n2]; + n4++; + } + // no check for partial last + return dstLen; +} diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index 0db74a0b709..7033aecccf8 100644 --- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -1861,12 +1861,18 @@ Dbacc::xfrmKeyData(Signal* signal) dstWords = srcWords; } else { jam(); + Uint32 typeId = AttributeDescriptor::getType(keyAttr.attributeDescriptor); + Uint32 lb, len; + bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len); + ndbrequire(ok); Uint32 xmul = cs->strxfrm_multiply; if (xmul == 0) xmul = 1; - Uint32 dstLen = xmul * srcBytes; + // see comment in DbtcMain.cpp + Uint32 dstLen = xmul * (srcBytes - lb); ndbrequire(dstLen <= ((dstSize - dstPos) << 2)); - uint n = (*cs->coll->strnxfrm)(cs, dstPtr, dstLen, srcPtr, srcBytes); + int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len); + ndbrequire(n != -1); while ((n & 3) != 0) dstPtr[n++] = 0; dstWords = (n >> 2); diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 96995df44d5..709cd8a4c0f 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -4827,9 +4827,7 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it, } } - /** - * Ignore incoming old-style type and recompute it. - */ + // compute attribute size and array size bool translateOk = attrDesc.translateExtType(); tabRequire(translateOk, CreateTableRef::Inconsistency); @@ -7849,7 +7847,7 @@ void Dbdict::createEventUTIL_EXECUTE(Signal *signal, break; case ZALREADYEXIST: jam(); - evntRecPtr.p->m_errorCode = CreateEvntRef::EventExists; + evntRecPtr.p->m_errorCode = CreateEvntRef::EventNameExists; break; default: jam(); diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp index 60f57c60f1f..0472d25318b 100644 --- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp +++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp @@ -1446,7 +1446,7 @@ private: void gcpTcfinished(Signal* signal); void handleGcp(Signal* signal); void hash(Signal* signal); - Uint32 handle_special_hash(Uint32 dstHash[4], + bool handle_special_hash(Uint32 dstHash[4], Uint32* src, Uint32 srcLen, Uint32 tabPtrI, bool distr); diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index 2f371458b73..6d4ca2d9078 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -2314,7 +2314,7 @@ void Dbtc::hash(Signal* signal) }//if }//Dbtc::hash() -Uint32 +bool Dbtc::handle_special_hash(Uint32 dstHash[4], Uint32* src, Uint32 srcLen, Uint32 tabPtrI, bool distr) @@ -2349,17 +2349,26 @@ Dbtc::handle_special_hash(Uint32 dstHash[4], Uint32* src, Uint32 srcLen, dstWords = srcWords; } else { jam(); + Uint32 typeId = + AttributeDescriptor::getType(keyAttr.attributeDescriptor); + Uint32 lb, len; + bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len); + ndbrequire(ok); Uint32 xmul = cs->strxfrm_multiply; if (xmul == 0) xmul = 1; - Uint32 dstLen = xmul * srcBytes; + /* + * Varchar is really Char. End spaces do not matter. To get + * same hash we blank-pad to maximum length via strnxfrm. + * TODO use MySQL charset-aware hash function instead + */ + Uint32 dstLen = xmul * (srcBytes - lb); ndbrequire(dstLen <= ((dstSize - dstPos) << 2)); - uint n = (*cs->coll->strnxfrm)(cs, dstPtr, dstLen, srcPtr, srcBytes); + uint n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len); while ((n & 3) != 0) { dstPtr[n++] = 0; } dstWords = (n >> 2); - } dstPos += dstWords; srcPos += srcWords; @@ -2418,6 +2427,7 @@ Dbtc::handle_special_hash(Uint32 dstHash[4], Uint32* src, Uint32 srcLen, md5_hash(tmp, (Uint64*)dst, dstPos); dstHash[1] = tmp[1]; } + return true; // success } /* diff --git a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp index 015d555e67d..06b2b3f4cb4 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp @@ -363,25 +363,25 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer, ljam(); Tablerec* regTabPtr = tabptr.p; Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(attrDescriptor); + uchar* dstPtr = (uchar*)&outBuffer[indexBuf]; + const uchar* srcPtr = (uchar*)&tTupleHeader[readOffset]; Uint32 i = AttributeOffset::getCharsetPos(attrDes2); ndbrequire(i < regTabPtr->noOfCharsets); CHARSET_INFO* cs = regTabPtr->charsetArray[i]; - Uint32 xmul = cs->strxfrm_multiply; - if (xmul == 0) - xmul = 1; - Uint32 dstLen = xmul * srcBytes; - Uint32 maxIndexBuf = indexBuf + (dstLen >> 2); - if (maxIndexBuf <= maxRead) { - ljam(); - uchar* dstPtr = (uchar*)&outBuffer[indexBuf]; - const uchar* srcPtr = (uchar*)&tTupleHeader[readOffset]; - const char* ssrcPtr = (const char*)srcPtr; - // could verify data format optionally - if (true || - (*cs->cset->well_formed_len)(cs, ssrcPtr, ssrcPtr + srcBytes, ZNIL) == srcBytes) { + Uint32 typeId = AttributeDescriptor::getType(attrDescriptor); + Uint32 lb, len; + bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len); + if (ok) { + Uint32 xmul = cs->strxfrm_multiply; + if (xmul == 0) + xmul = 1; + // see comment in DbtcMain.cpp + Uint32 dstLen = xmul * (srcBytes - lb); + Uint32 maxIndexBuf = indexBuf + (dstLen >> 2); + if (maxIndexBuf <= maxRead) { ljam(); - // normalize - Uint32 n = (*cs->coll->strnxfrm)(cs, dstPtr, dstLen, srcPtr, srcBytes); + int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len); + ndbrequire(n != -1); while ((n & 3) != 0) { dstPtr[n++] = 0; } @@ -393,11 +393,11 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer, return true; } else { ljam(); - terrorCode = ZTUPLE_CORRUPTED_ERROR; + terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; } } else { ljam(); - terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR; + terrorCode = ZTUPLE_CORRUPTED_ERROR; } } return false; @@ -814,10 +814,15 @@ Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer, // not const in MySQL CHARSET_INFO* cs = regTabPtr->charsetArray[i]; const char* ssrc = (const char*)&inBuffer[tInBufIndex + 1]; + Uint32 lb, len; + if (! NdbSqlUtil::get_var_length(typeId, ssrc, bytes, lb, len)) { + ljam(); + terrorCode = ZINVALID_CHAR_FORMAT; + return false; + } // fast fix bug#7340 if (typeId != NDB_TYPE_TEXT && - (*cs->cset->well_formed_len)(cs, ssrc, ssrc+bytes, ZNIL) != bytes) - { + (*cs->cset->well_formed_len)(cs, ssrc + lb, ssrc + lb + len, ZNIL) != len) { ljam(); terrorCode = ZINVALID_CHAR_FORMAT; return false; diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp index 84081e76a8c..a61b7c1f5ca 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp @@ -177,18 +177,29 @@ Dbtux::execTUX_BOUND_INFO(Signal* signal) dstWords = srcWords; } else { jam(); + Uint32 typeId = descAttr.m_typeId; + Uint32 lb, len; + bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len); + if (! ok) { + jam(); + scan.m_state = ScanOp::Invalid; + sig->errorCode = TuxBoundInfo::InvalidCharFormat; + return; + } CHARSET_INFO* cs = all_charsets[descAttr.m_charset]; Uint32 xmul = cs->strxfrm_multiply; if (xmul == 0) xmul = 1; - Uint32 dstLen = xmul * srcBytes; + // see comment in DbtcMain.cpp + Uint32 dstLen = xmul * (srcBytes - lb); if (dstLen > ((dstSize - dstPos) << 2)) { jam(); scan.m_state = ScanOp::Invalid; sig->errorCode = TuxBoundInfo::TooMuchAttrInfo; return; } - Uint32 n = (*cs->coll->strnxfrm)(cs, dstPtr, dstLen, srcPtr, srcBytes); + int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len); + ndbrequire(n != -1); while ((n & 3) != 0) { dstPtr[n++] = 0; } diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp index 4a8b79d3ddc..a638b85426e 100644 --- a/ndb/src/mgmsrv/main.cpp +++ b/ndb/src/mgmsrv/main.cpp @@ -155,6 +155,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), "Tcp connections will now be used instead\n"); opt_ndb_shm= 0; #endif + break; case '?': usage(); exit(0); diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp index baa34b2006e..5ffb087cde7 100644 --- a/ndb/src/ndbapi/Ndb.cpp +++ b/ndb/src/ndbapi/Ndb.cpp @@ -1177,7 +1177,14 @@ NdbEventOperation* Ndb::createEventOperation(const char* eventName, tOp = new NdbEventOperation(this, eventName, bufferLength); - if (tOp->getState() != NdbEventOperation::CREATED) { + if (tOp == 0) + { + theError.code= 4000; + return NULL; + } + + if (tOp->getState() != NdbEventOperation::EO_CREATED) { + theError.code= tOp->getNdbError().code; delete tOp; tOp = NULL; } diff --git a/ndb/src/ndbapi/NdbBlob.cpp b/ndb/src/ndbapi/NdbBlob.cpp index 86b161e3b9b..0638f6e4c51 100644 --- a/ndb/src/ndbapi/NdbBlob.cpp +++ b/ndb/src/ndbapi/NdbBlob.cpp @@ -875,7 +875,7 @@ NdbBlob::readParts(char* buf, Uint32 part, Uint32 count) setErrorCode(tOp); return -1; } - tOp->m_abortOption = AbortOnError; + tOp->m_abortOption = NdbTransaction::AbortOnError; buf += thePartSize; n++; thePendingBlobOps |= (1 << NdbOperation::ReadRequest); @@ -898,7 +898,7 @@ NdbBlob::insertParts(const char* buf, Uint32 part, Uint32 count) setErrorCode(tOp); return -1; } - tOp->m_abortOption = AbortOnError; + tOp->m_abortOption = NdbTransaction::AbortOnError; buf += thePartSize; n++; thePendingBlobOps |= (1 << NdbOperation::InsertRequest); @@ -921,7 +921,7 @@ NdbBlob::updateParts(const char* buf, Uint32 part, Uint32 count) setErrorCode(tOp); return -1; } - tOp->m_abortOption = AbortOnError; + tOp->m_abortOption = NdbTransaction::AbortOnError; buf += thePartSize; n++; thePendingBlobOps |= (1 << NdbOperation::UpdateRequest); @@ -943,7 +943,7 @@ NdbBlob::deleteParts(Uint32 part, Uint32 count) setErrorCode(tOp); return -1; } - tOp->m_abortOption = AbortOnError; + tOp->m_abortOption = NdbTransaction::AbortOnError; n++; thePendingBlobOps |= (1 << NdbOperation::DeleteRequest); theNdbCon->thePendingBlobOps |= (1 << NdbOperation::DeleteRequest); @@ -976,11 +976,11 @@ NdbBlob::deletePartsUnknown(Uint32 part) setErrorCode(tOp); return -1; } - tOp->m_abortOption = AO_IgnoreError; + tOp->m_abortOption= NdbTransaction::AO_IgnoreError; n++; } DBG("deletePartsUnknown: executeNoBlobs [in] bat=" << bat); - if (theNdbCon->executeNoBlobs(NoCommit) == -1) + if (theNdbCon->executeNoBlobs(NdbTransaction::NoCommit) == -1) return -1; DBG("deletePartsUnknown: executeNoBlobs [out]"); n = 0; @@ -1012,7 +1012,7 @@ NdbBlob::executePendingBlobReads() Uint8 flags = (1 << NdbOperation::ReadRequest); if (thePendingBlobOps & flags) { DBG("executePendingBlobReads: executeNoBlobs [in]"); - if (theNdbCon->executeNoBlobs(NoCommit) == -1) + if (theNdbCon->executeNoBlobs(NdbTransaction::NoCommit) == -1) return -1; DBG("executePendingBlobReads: executeNoBlobs [out]"); thePendingBlobOps = 0; @@ -1027,7 +1027,7 @@ NdbBlob::executePendingBlobWrites() Uint8 flags = 0xFF & ~(1 << NdbOperation::ReadRequest); if (thePendingBlobOps & flags) { DBG("executePendingBlobWrites: executeNoBlobs [in]"); - if (theNdbCon->executeNoBlobs(NoCommit) == -1) + if (theNdbCon->executeNoBlobs(NdbTransaction::NoCommit) == -1) return -1; DBG("executePendingBlobWrites: executeNoBlobs [out]"); thePendingBlobOps = 0; @@ -1175,7 +1175,7 @@ NdbBlob::atPrepare(NdbTransaction* aCon, NdbOperation* anOp, const NdbColumnImpl * back after postExecute. */ int -NdbBlob::preExecute(ExecType anExecType, bool& batch) +NdbBlob::preExecute(NdbTransaction::ExecType anExecType, bool& batch) { DBG("preExecute [in]"); if (theState == Invalid) @@ -1224,7 +1224,7 @@ NdbBlob::preExecute(ExecType anExecType, bool& batch) return -1; } if (isWriteOp()) { - tOp->m_abortOption = AO_IgnoreError; + tOp->m_abortOption = NdbTransaction::AO_IgnoreError; } theHeadInlineReadOp = tOp; // execute immediately @@ -1270,7 +1270,7 @@ NdbBlob::preExecute(ExecType anExecType, bool& batch) return -1; } if (isWriteOp()) { - tOp->m_abortOption = AO_IgnoreError; + tOp->m_abortOption = NdbTransaction::AO_IgnoreError; } theHeadInlineReadOp = tOp; // execute immediately @@ -1316,18 +1316,18 @@ NdbBlob::preExecute(ExecType anExecType, bool& batch) * any remaining prepared operations. */ int -NdbBlob::postExecute(ExecType anExecType) +NdbBlob::postExecute(NdbTransaction::ExecType anExecType) { DBG("postExecute [in] type=" << anExecType); if (theState == Invalid) return -1; if (theState == Active) { - setState(anExecType == NoCommit ? Active : Closed); + setState(anExecType == NdbTransaction::NoCommit ? Active : Closed); DBG("postExecute [skip]"); return 0; } assert(theState == Prepared); - setState(anExecType == NoCommit ? Active : Closed); + setState(anExecType == NdbTransaction::NoCommit ? Active : Closed); assert(isKeyOp()); if (isIndexOp()) { NdbBlob* tFirstBlob = theNdbOp->theBlobList; @@ -1343,14 +1343,15 @@ NdbBlob::postExecute(ExecType anExecType) return -1; if (theGetFlag) { assert(theGetSetBytes == 0 || theGetBuf != 0); - assert(theGetSetBytes <= theInlineSize || anExecType == NoCommit); + assert(theGetSetBytes <= theInlineSize || + anExecType == NdbTransaction::NoCommit); Uint32 bytes = theGetSetBytes; if (readDataPrivate(theGetBuf, bytes) == -1) return -1; } } if (isUpdateOp()) { - assert(anExecType == NoCommit); + assert(anExecType == NdbTransaction::NoCommit); getHeadFromRecAttr(); if (theSetFlag) { // setValue overwrites everything @@ -1367,7 +1368,7 @@ NdbBlob::postExecute(ExecType anExecType) } } if (isWriteOp() && isTableOp()) { - assert(anExecType == NoCommit); + assert(anExecType == NdbTransaction::NoCommit); if (theHeadInlineReadOp->theError.code == 0) { int tNullFlag = theNullFlag; Uint64 tLength = theLength; @@ -1418,18 +1419,18 @@ NdbBlob::postExecute(ExecType anExecType) } } if (isDeleteOp()) { - assert(anExecType == NoCommit); + assert(anExecType == NdbTransaction::NoCommit); getHeadFromRecAttr(); if (deleteParts(0, getPartCount()) == -1) return -1; } - setState(anExecType == NoCommit ? Active : Closed); + setState(anExecType == NdbTransaction::NoCommit ? Active : Closed); // activation callback if (theActiveHook != NULL) { if (invokeActiveHook() == -1) return -1; } - if (anExecType == NoCommit && theHeadInlineUpdateFlag) { + if (anExecType == NdbTransaction::NoCommit && theHeadInlineUpdateFlag) { NdbOperation* tOp = theNdbCon->getNdbOperation(theTable); if (tOp == NULL || tOp->updateTuple() == -1 || @@ -1438,7 +1439,7 @@ NdbBlob::postExecute(ExecType anExecType) setErrorCode(NdbBlobImpl::ErrAbort); return -1; } - tOp->m_abortOption = AbortOnError; + tOp->m_abortOption = NdbTransaction::AbortOnError; DBG("added op to update head+inline"); } DBG("postExecute [out]"); @@ -1468,7 +1469,7 @@ NdbBlob::preCommit() setErrorCode(NdbBlobImpl::ErrAbort); return -1; } - tOp->m_abortOption = AbortOnError; + tOp->m_abortOption = NdbTransaction::AbortOnError; DBG("added op to update head+inline"); } } diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp index b7a4419a491..db912995b5f 100644 --- a/ndb/src/ndbapi/NdbDictionary.cpp +++ b/ndb/src/ndbapi/NdbDictionary.cpp @@ -585,6 +585,13 @@ NdbDictionary::Event::Event(const char * name) setName(name); } +NdbDictionary::Event::Event(const char * name, const Table& table) + : m_impl(* new NdbEventImpl(* this)) +{ + setName(name); + setTable(table); +} + NdbDictionary::Event::Event(NdbEventImpl & impl) : m_impl(impl) { @@ -604,12 +611,30 @@ NdbDictionary::Event::setName(const char * name) m_impl.setName(name); } +const char * +NdbDictionary::Event::getName() const +{ + return m_impl.getName(); +} + +void +NdbDictionary::Event::setTable(const Table& table) +{ + m_impl.setTable(table); +} + void NdbDictionary::Event::setTable(const char * table) { m_impl.setTable(table); } +const char* +NdbDictionary::Event::getTableName() const +{ + return m_impl.getTableName(); +} + void NdbDictionary::Event::addTableEvent(const TableEvent t) { @@ -617,11 +642,17 @@ NdbDictionary::Event::addTableEvent(const TableEvent t) } void -NdbDictionary::Event::setDurability(const EventDurability d) +NdbDictionary::Event::setDurability(EventDurability d) { m_impl.setDurability(d); } +NdbDictionary::Event::EventDurability +NdbDictionary::Event::getDurability() const +{ + return m_impl.getDurability(); +} + void NdbDictionary::Event::addColumn(const Column & c){ NdbColumnImpl* col = new NdbColumnImpl; @@ -649,6 +680,11 @@ NdbDictionary::Event::addEventColumns(int n, const char ** names) addEventColumn(names[i]); } +int NdbDictionary::Event::getNoOfEventColumns() const +{ + return m_impl.getNoOfEventColumns(); +} + NdbDictionary::Object::Status NdbDictionary::Event::getObjectStatus() const { @@ -823,6 +859,12 @@ NdbDictionary::Dictionary::listObjects(List& list, Object::Type type) return m_impl.listObjects(list, type); } +int +NdbDictionary::Dictionary::listObjects(List& list, Object::Type type) const +{ + return m_impl.listObjects(list, type); +} + int NdbDictionary::Dictionary::listIndexes(List& list, const char * tableName) { @@ -834,6 +876,18 @@ NdbDictionary::Dictionary::listIndexes(List& list, const char * tableName) return m_impl.listIndexes(list, tab->getTableId()); } +int +NdbDictionary::Dictionary::listIndexes(List& list, + const char * tableName) const +{ + const NdbDictionary::Table* tab= getTable(tableName); + if(tab == 0) + { + return -1; + } + return m_impl.listIndexes(list, tab->getTableId()); +} + const struct NdbError & NdbDictionary::Dictionary::getNdbError() const { return m_impl.getNdbError(); @@ -919,6 +973,12 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col) case NdbDictionary::Column::Bit: out << "Bit(" << col.getLength() << ")"; break; + case NdbDictionary::Column::Longvarchar: + out << "Longvarchar(" << col.getLength() << ";" << csname << ")"; + break; + case NdbDictionary::Column::Longvarbinary: + out << "Longvarbinary(" << col.getLength() << ")"; + break; default: out << "Type" << (Uint32)col.getType(); break; diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 2d37c7883ec..13f9d0c48e1 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -143,6 +143,24 @@ NdbColumnImpl::init(Type t) m_length = 4; m_cs = default_cs; break; + case Bit: + m_precision = 0; + m_scale = 0; + m_length = 1; + m_cs = NULL; + break; + case Longvarchar: + m_precision = 0; + m_scale = 0; + m_length = 1; // legal + m_cs = default_cs; + break; + case Longvarbinary: + m_precision = 0; + m_scale = 0; + m_length = 1; // legal + m_cs = NULL; + break; case Undefined: assert(false); break; @@ -546,24 +564,30 @@ void NdbEventImpl::setName(const char * name) m_externalName.assign(name); } +const char *NdbEventImpl::getName() const +{ + return m_externalName.c_str(); +} + +void +NdbEventImpl::setTable(const NdbDictionary::Table& table) +{ + m_tableImpl= &NdbTableImpl::getImpl(table); + m_tableName.assign(m_tableImpl->getName()); +} + void NdbEventImpl::setTable(const char * table) { m_tableName.assign(table); } -const char * -NdbEventImpl::getTable() const +const char * +NdbEventImpl::getTableName() const { return m_tableName.c_str(); } -const char * -NdbEventImpl::getName() const -{ - return m_externalName.c_str(); -} - void NdbEventImpl::addTableEvent(const NdbDictionary::Event::TableEvent t = NdbDictionary::Event::TE_ALL) { @@ -581,6 +605,17 @@ NdbEventImpl::setDurability(const NdbDictionary::Event::EventDurability d) m_dur = d; } +NdbDictionary::Event::EventDurability +NdbEventImpl::getDurability() const +{ + return m_dur; +} + +int NdbEventImpl::getNoOfEventColumns() const +{ + return m_attrIds.size() + m_columns.size(); +} + /** * NdbDictionaryImpl */ @@ -941,7 +976,7 @@ NdbDictInterface::dictSignal(NdbApiSignal* signal, for (int j=0; j < noerrcodes; j++) if(m_error.code == errcodes[j]) { doContinue = 1; - continue; + break; } if (doContinue) continue; @@ -1007,12 +1042,14 @@ NdbDictInterface::getTable(class NdbApiSignal * signal, Uint32 noOfSections, bool fullyQualifiedNames) { //GetTabInfoReq * const req = CAST_PTR(GetTabInfoReq, signal->getDataPtrSend()); + int errCodes[] = {GetTabInfoRef::Busy }; + int r = dictSignal(signal,ptr,noOfSections, 0/*do not use masternode id*/, 100, WAIT_GET_TAB_INFO_REQ, WAITFOR_RESPONSE_TIMEOUT, - NULL,0); + errCodes, 1); if (r) return 0; NdbTableImpl * rt = 0; @@ -1149,6 +1186,8 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, const Uint32 * data, Uint32 len, bool fullyQualifiedNames) { + DBUG_ENTER("NdbDictInterface::parseTableInfo"); + SimplePropertiesLinearReader it(data, len); DictTabInfo::Table tableDesc; tableDesc.init(); SimpleProperties::UnpackStatus s; @@ -1158,7 +1197,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, true, true); if(s != SimpleProperties::Break){ - return 703; + DBUG_RETURN(703); } const char * internalName = tableDesc.TableName; const char * externalName = Ndb::externalizeTableName(internalName, fullyQualifiedNames); @@ -1209,15 +1248,17 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, true, true); if(s != SimpleProperties::Break){ delete impl; - return 703; + DBUG_RETURN(703); } NdbColumnImpl * col = new NdbColumnImpl(); col->m_attrId = attrDesc.AttributeId; col->setName(attrDesc.AttributeName); - if (attrDesc.AttributeExtType >= NDB_TYPE_MAX) { + + // check type and compute attribute size and array size + if (! attrDesc.translateExtType()) { delete impl; - return 703; + DBUG_RETURN(703); } col->m_type = (NdbDictionary::Column::Type)attrDesc.AttributeExtType; col->m_precision = (attrDesc.AttributeExtPrecision & 0xFFFF); @@ -1228,21 +1269,15 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, // charset is defined exactly for char types if (col->getCharType() != (cs_number != 0)) { delete impl; - return 703; + DBUG_RETURN(703); } if (col->getCharType()) { col->m_cs = get_charset(cs_number, MYF(0)); if (col->m_cs == NULL) { delete impl; - return 743; + DBUG_RETURN(743); } } - - // translate to old kernel types and sizes - if (! attrDesc.translateExtType()) { - delete impl; - return 703; - } col->m_attrSize = (1 << attrDesc.AttributeSize) / 8; col->m_arraySize = attrDesc.AttributeArraySize; if(attrDesc.AttributeSize == 0) @@ -1275,7 +1310,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, if(impl->m_columns[attrDesc.AttributeId] != 0){ delete col; delete impl; - return 703; + DBUG_RETURN(703); } impl->m_columns[attrDesc.AttributeId] = col; it.next(); @@ -1286,7 +1321,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, impl->m_noOfBlobs = blobCount; impl->m_noOfDistributionKeys = distKeys; * ret = impl; - return 0; + DBUG_RETURN(0); } /***************************************************************** @@ -1446,7 +1481,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, if (col->m_autoIncrement) { if (haveAutoIncrement) { m_error.code = 4335; - return -1; + DBUG_RETURN(-1); } haveAutoIncrement = true; autoIncrementValue = col->m_autoIncrementInitialValue; @@ -1496,14 +1531,16 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, tmpAttr.AttributeNullableFlag = col->m_nullable; tmpAttr.AttributeDKey = col->m_distributionKey; - if (col->m_type >= NDB_TYPE_MAX) { - m_error.code = 703; - return -1; - } tmpAttr.AttributeExtType = (Uint32)col->m_type; tmpAttr.AttributeExtPrecision = ((unsigned)col->m_precision & 0xFFFF); tmpAttr.AttributeExtScale = col->m_scale; tmpAttr.AttributeExtLength = col->m_length; + + // check type and compute attribute size and array size + if (! tmpAttr.translateExtType()) { + m_error.code = 703; + DBUG_RETURN(-1); + } // charset is defined exactly for char types if (col->getCharType() != (col->m_cs != NULL)) { m_error.code = 703; @@ -1517,16 +1554,13 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, // distribution key not supported for Char attribute if (col->m_distributionKey && col->m_cs != NULL) { m_error.code = 745; - return -1; + DBUG_RETURN(-1); } // charset in upper half of precision if (col->getCharType()) { tmpAttr.AttributeExtPrecision |= (col->m_cs->number << 16); } - // DICT will ignore and recompute this - (void)tmpAttr.translateExtType(); - tmpAttr.AttributeAutoIncrement = col->m_autoIncrement; BaseString::snprintf(tmpAttr.AttributeDefaultValue, sizeof(tmpAttr.AttributeDefaultValue), @@ -1571,7 +1605,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, ret= createTable(&tSignal, ptr); if (ret) - return ret; + DBUG_RETURN(ret); if (haveAutoIncrement) { if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(), @@ -2231,13 +2265,12 @@ int NdbDictionaryImpl::createEvent(NdbEventImpl & evnt) { int i; - NdbTableImpl* tab = getTable(evnt.getTable()); + NdbTableImpl* tab = getTable(evnt.getTableName()); if(tab == 0){ - // m_error.code = 3249; - ndbout_c(":createEvent: table %s not found", evnt.getTable()); #ifdef EVENT_DEBUG - ndbout_c("NdbDictionaryImpl::createEvent: table not found: %s", evnt.getTable()); + ndbout_c("NdbDictionaryImpl::createEvent: table not found: %s", + evnt.getTableName()); #endif return -1; } @@ -2259,7 +2292,8 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt) evnt.m_facade->addColumn(*(col_impl->m_facade)); } else { ndbout_c("Attr id %u in table %s not found", evnt.m_attrIds[i], - evnt.getTable()); + evnt.getTableName()); + m_error.code= 4713; return -1; } } @@ -2517,8 +2551,8 @@ NdbDictionaryImpl::getEvent(const char * eventName) } // We only have the table name with internal name - ev->setTable(m_ndb.externalizeTableName(ev->getTable())); - ev->m_tableImpl = getTable(ev->getTable()); + ev->setTable(m_ndb.externalizeTableName(ev->getTableName())); + ev->m_tableImpl = getTable(ev->getTableName()); // get the columns from the attrListBitmask @@ -2601,6 +2635,7 @@ void NdbDictInterface::execSUB_STOP_CONF(NdbApiSignal * signal, LinearSectionPtr ptr[3]) { + DBUG_ENTER("NdbDictInterface::execSUB_STOP_REF"); #ifdef EVENT_DEBUG ndbout << "Got GSN_SUB_STOP_CONF" << endl; #endif @@ -2617,17 +2652,21 @@ void NdbDictInterface::execSUB_STOP_REF(NdbApiSignal * signal, LinearSectionPtr ptr[3]) { + DBUG_ENTER("NdbDictInterface::execSUB_STOP_REF"); #ifdef EVENT_DEBUG ndbout << "Got GSN_SUB_STOP_REF" << endl; #endif - // SubRemoveConf * const sumaRemoveRef = CAST_CONSTPTR(SubRemoveRef, signal->getDataPtr()); + const SubRemoveRef * const sumaRemoveRef= + CAST_CONSTPTR(SubRemoveRef, signal->getDataPtr()); // Uint32 subscriptionId = sumaRemoveRef->subscriptionId; // Uint32 subscriptionKey = sumaRemoveRef->subscriptionKey; // Uint32 senderData = sumaRemoveRef->senderData; - m_error.code = 1; + m_error.code= sumaRemoveRef->errorCode; m_waiter.signal(NO_WAIT); + + DBUG_VOID_RETURN; } void diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp index 5a32349cda5..619e5c1f36e 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -195,11 +195,14 @@ public: void setName(const char * name); const char * getName() const; + void setTable(const NdbDictionary::Table& table); void setTable(const char * table); - const char * getTable() const; + const char * getTableName() const; void addTableEvent(const NdbDictionary::Event::TableEvent t); - void setDurability(const NdbDictionary::Event::EventDurability d); + void setDurability(NdbDictionary::Event::EventDurability d); + NdbDictionary::Event::EventDurability getDurability() const; void addEventColumn(const NdbColumnImpl &c); + int getNoOfEventColumns() const; void print() { ndbout_c("NdbEventImpl: id=%d, key=%d", @@ -448,7 +451,8 @@ bool NdbColumnImpl::getCharType() const { return (m_type == NdbDictionary::Column::Char || m_type == NdbDictionary::Column::Varchar || - m_type == NdbDictionary::Column::Text); + m_type == NdbDictionary::Column::Text || + m_type == NdbDictionary::Column::Longvarchar); } inline diff --git a/ndb/src/ndbapi/NdbEventOperation.cpp b/ndb/src/ndbapi/NdbEventOperation.cpp index d209293f8b0..e99cad918c5 100644 --- a/ndb/src/ndbapi/NdbEventOperation.cpp +++ b/ndb/src/ndbapi/NdbEventOperation.cpp @@ -15,23 +15,9 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/***************************************************************************** - * Name: NdbEventOperation.cpp - * Include: - * Link: - * Author: Tomas Ulin MySQL AB - * Date: 2003-11-21 - * Version: 0.1 - * Description: Event support - * Documentation: - * Adjust: 2003-11-21 Tomas Ulin First version. - ****************************************************************************/ - #include -#include +#include #include -#include -#include #include "NdbEventOperationImpl.hpp" #include "NdbDictionaryImpl.hpp" @@ -123,3 +109,7 @@ NdbEventOperation::wait(void *p, int aMillisecondNumber) NdbEventOperation::NdbEventOperation(NdbEventOperationImpl& impl) : m_impl(impl) {} +const struct NdbError & +NdbEventOperation::getNdbError() const { + return m_impl.getNdbError(); +} diff --git a/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/ndb/src/ndbapi/NdbEventOperationImpl.cpp index b3fac64d1c4..69c05dcb0b7 100644 --- a/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -55,9 +55,8 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N, const char* eventName, const int bufferLength) : NdbEventOperation(*this), m_ndb(theNdb), - m_state(ERROR), m_bufferL(bufferLength) + m_state(EO_ERROR), m_bufferL(bufferLength) { - m_eventId = 0; theFirstRecAttrs[0] = NULL; theCurrentRecAttrs[0] = NULL; @@ -71,16 +70,15 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N, // we should lookup id in Dictionary, TODO // also make sure we only have one listener on each event - if (!m_ndb) { ndbout_c("m_ndb=NULL"); return; } + if (!m_ndb) abort(); NdbDictionary::Dictionary *myDict = m_ndb->getDictionary(); - if (!myDict) { ndbout_c("getDictionary=NULL"); return; } + if (!myDict) { m_error.code= m_ndb->getNdbError().code; return; } const NdbDictionary::Event *myEvnt = myDict->getEvent(eventName); - if (!myEvnt) { ndbout_c("getEvent()=NULL"); return; } + if (!myEvnt) { m_error.code= myDict->getNdbError().code; return; } m_eventImpl = &myEvnt->m_impl; - if (!m_eventImpl) { ndbout_c("m_impl=NULL"); return; } m_bufferHandle = m_ndb->getGlobalEventBufferHandle(); if (m_bufferHandle->m_bufferL > 0) @@ -88,7 +86,7 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N, else m_bufferHandle->m_bufferL = m_bufferL; - m_state = CREATED; + m_state = EO_CREATED; } NdbEventOperationImpl::~NdbEventOperationImpl() @@ -106,7 +104,7 @@ NdbEventOperationImpl::~NdbEventOperationImpl() p = p_next; } } - if (m_state == NdbEventOperation::EXECUTING) { + if (m_state == EO_EXECUTING) { stop(); // m_bufferHandle->dropSubscribeEvent(m_bufferId); ; // We should send stop signal here @@ -122,7 +120,7 @@ NdbEventOperationImpl::getState() NdbRecAttr* NdbEventOperationImpl::getValue(const char *colName, char *aValue, int n) { - if (m_state != NdbEventOperation::CREATED) { + if (m_state != EO_CREATED) { ndbout_c("NdbEventOperationImpl::getValue may only be called between instantiation and execute()"); return NULL; } @@ -211,8 +209,8 @@ NdbEventOperationImpl::execute() { NdbDictionary::Dictionary *myDict = m_ndb->getDictionary(); if (!myDict) { - ndbout_c("NdbEventOperation::execute(): getDictionary=NULL"); - return 0; + m_error.code= m_ndb->getNdbError().code; + return -1; } if (theFirstRecAttrs[0] == NULL) { // defaults to get all @@ -223,13 +221,17 @@ NdbEventOperationImpl::execute() int hasSubscriber; - m_bufferId = + int r= m_bufferHandle->prepareAddSubscribeEvent(m_eventImpl->m_eventId, hasSubscriber /* return value */); + m_error.code= 4709; - m_eventImpl->m_bufferId = m_bufferId; + if (r < 0) + return -1; - int r = -1; + m_eventImpl->m_bufferId = m_bufferId = (Uint32)r; + + r = -1; if (m_bufferId >= 0) { // now we check if there's already a subscriber @@ -241,14 +243,14 @@ NdbEventOperationImpl::execute() if (r) { //Error m_bufferHandle->unprepareAddSubscribeEvent(m_bufferId); - m_state = NdbEventOperation::ERROR; + m_state = EO_ERROR; } else { m_bufferHandle->addSubscribeEvent(m_bufferId, this); - m_state = NdbEventOperation::EXECUTING; + m_state = EO_EXECUTING; } } else { //Error - m_state = NdbEventOperation::ERROR; + m_state = EO_ERROR; } return r; } @@ -256,15 +258,16 @@ NdbEventOperationImpl::execute() int NdbEventOperationImpl::stop() { - if (m_state != NdbEventOperation::EXECUTING) - return -1; + DBUG_ENTER("NdbEventOperationImpl::stop"); + if (m_state != EO_EXECUTING) + DBUG_RETURN(-1); // ndbout_c("NdbEventOperation::stopping()"); NdbDictionary::Dictionary *myDict = m_ndb->getDictionary(); if (!myDict) { - ndbout_c("NdbEventOperation::stop(): getDictionary=NULL"); - return 0; + m_error.code= m_ndb->getNdbError().code; + DBUG_RETURN(-1); } NdbDictionaryImpl & myDictImpl = NdbDictionaryImpl::getImpl(*myDict); @@ -275,8 +278,8 @@ NdbEventOperationImpl::stop() hasSubscriber /* return value */); if (ret < 0) { - ndbout_c("prepareDropSubscribeEvent failed"); - return -1; + m_error.code= 4712; + DBUG_RETURN(-1); } // m_eventImpl->m_bufferId = m_bufferId; @@ -293,17 +296,17 @@ NdbEventOperationImpl::stop() if (r) { //Error m_bufferHandle->unprepareDropSubscribeEvent(m_bufferId); - m_state = NdbEventOperation::ERROR; + m_error.code= myDictImpl.m_error.code; + m_state = EO_ERROR; } else { #ifdef EVENT_DEBUG ndbout_c("NdbEventOperation::dropping()"); #endif m_bufferHandle->dropSubscribeEvent(m_bufferId); - m_state = NdbEventOperation::CREATED; + m_state = EO_CREATED; } - - return r; + DBUG_RETURN(r); } bool @@ -363,11 +366,11 @@ NdbEventOperationImpl::next(int *pOverrun) #ifdef EVENT_DEBUG printf("after values sz=%u\n", ptr[1].sz); - for(int i=0; i < ptr[1].sz; i++) + for(int i=0; i < (int)ptr[1].sz; i++) printf ("H'%.8X ",ptr[1].p[i]); printf("\n"); printf("before values sz=%u\n", ptr[2].sz); - for(int i=0; i < ptr[2].sz; i++) + for(int i=0; i < (int)ptr[2].sz; i++) printf ("H'%.8X ",ptr[2].p[i]); printf("\n"); #endif @@ -871,6 +874,7 @@ int NdbGlobalEventBuffer::real_prepareAddSubscribeEvent (NdbGlobalEventBufferHandle *aHandle, Uint32 eventId, int& hasSubscriber) { + DBUG_ENTER("NdbGlobalEventBuffer::real_prepareAddSubscribeEvent"); int i; int bufferId = -1; @@ -900,7 +904,10 @@ NdbGlobalEventBuffer::real_prepareAddSubscribeEvent } else { ndbout_c("prepareAddSubscribeEvent: Can't accept more subscribers"); // add_drop_unlock(); - return -1; + DBUG_PRINT("error",("Can't accept more subscribers:" + " bufferId=%d, m_no=%d, m_max=%d", + bufferId, m_no, m_max)); + DBUG_RETURN(-1); } } @@ -947,7 +954,7 @@ NdbGlobalEventBuffer::real_prepareAddSubscribeEvent } else { ndbout_c("prepareAddSubscribeEvent: Can't accept more subscribers"); // add_drop_unlock(); - return -1; + DBUG_RETURN(-1); } } bufferId = NO_ID(ni, bufferId); @@ -976,7 +983,7 @@ NdbGlobalEventBuffer::real_prepareAddSubscribeEvent /* we now have a lock on the prepare so that no one can mess with this * unlock comes in unprepareAddSubscribeEvent or addSubscribeEvent */ - return bufferId; + DBUG_RETURN(bufferId); } void @@ -1039,6 +1046,7 @@ int NdbGlobalEventBuffer::real_prepareDropSubscribeEvent(int bufferId, int& hasSubscriber) { + DBUG_ENTER("NdbGlobalEventBuffer::real_prepareDropSubscribeEvent"); // add_drop_lock(); // only one thread can do add or drop at a time BufItem &b = m_buf[ID(bufferId)]; @@ -1054,9 +1062,9 @@ NdbGlobalEventBuffer::real_prepareDropSubscribeEvent(int bufferId, else if (n == 1) hasSubscriber = 0; else - return -1; + DBUG_RETURN(-1); - return 0; + DBUG_RETURN(0); } void diff --git a/ndb/src/ndbapi/NdbEventOperationImpl.hpp b/ndb/src/ndbapi/NdbEventOperationImpl.hpp index f67c998e639..fae9dda45e4 100644 --- a/ndb/src/ndbapi/NdbEventOperationImpl.hpp +++ b/ndb/src/ndbapi/NdbEventOperationImpl.hpp @@ -14,21 +14,13 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/***************************************************************************** - * Name: NdbEventOperationImpl.hpp - * Include: - * Link: - * Author: Tomas Ulin MySQL AB - * Date: 2003-11-21 - * Version: 0.1 - * Description: Event support - * Documentation: - * Adjust: 2003-11-21 Tomas Ulin First version. - ****************************************************************************/ - #ifndef NdbEventOperationImpl_H #define NdbEventOperationImpl_H +#include +#include +#include + class NdbGlobalEventBufferHandle; class NdbEventOperationImpl : public NdbEventOperation { public: @@ -61,6 +53,9 @@ public: void print(); void printAll(); + const NdbError & getNdbError() const; + NdbError m_error; + Ndb *m_ndb; NdbEventImpl *m_eventImpl; NdbGlobalEventBufferHandle *m_bufferHandle; diff --git a/ndb/src/ndbapi/NdbOperationDefine.cpp b/ndb/src/ndbapi/NdbOperationDefine.cpp index ce4a28c1273..835e33dfb40 100644 --- a/ndb/src/ndbapi/NdbOperationDefine.cpp +++ b/ndb/src/ndbapi/NdbOperationDefine.cpp @@ -406,6 +406,14 @@ int NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, const char* aValuePassed, Uint32 len) { + DBUG_ENTER("NdbOperation::setValue"); + DBUG_PRINT("enter", ("col=%s op=%d val=0x%x len=%u", + tAttrInfo->m_name.c_str(), + theOperationType, + aValuePassed, len)); + if (aValuePassed != NULL) + DBUG_DUMP("value", (char*)aValuePassed, len); + int tReturnCode; Uint32 tAttrId; Uint32 tData; @@ -421,7 +429,7 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, ; } else { setErrorCodeAbort(4234); - return -1; + DBUG_RETURN(-1); }//if } else { if (tStatus == GetValue) { @@ -432,7 +440,7 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, // to set values in the tuple by setValue. //-------------------------------------------------------------------- if (insertATTRINFO(Interpreter::EXIT_OK) == -1){ - return -1; + DBUG_RETURN(-1); } theInterpretedSize = theTotalCurrAI_Len - (theInitialReadSize + 5); @@ -443,47 +451,47 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, // setValue used in the wrong context. Application coding error. //------------------------------------------------------------------- setErrorCodeAbort(4234); //Wrong error code - return -1; + DBUG_RETURN(-1); }//if theStatus = SetValueInterpreted; }//if } else if (tOpType == InsertRequest) { if ((theStatus != SetValue) && (theStatus != OperationDefined)) { setErrorCodeAbort(4234); - return -1; + DBUG_RETURN(-1); }//if } else if (tOpType == ReadRequest || tOpType == ReadExclusive) { setErrorCodeAbort(4504); - return -1; + DBUG_RETURN(-1); } else if (tOpType == DeleteRequest) { setErrorCodeAbort(4504); - return -1; + DBUG_RETURN(-1); } else if (tOpType == OpenScanRequest || tOpType == OpenRangeScanRequest) { setErrorCodeAbort(4228); - return -1; + DBUG_RETURN(-1); } else { //--------------------------------------------------------------------- // setValue with undefined operation type. // Probably application coding error. //--------------------------------------------------------------------- setErrorCodeAbort(4108); - return -1; + DBUG_RETURN(-1); }//if if (tAttrInfo == NULL) { setErrorCodeAbort(4004); - return -1; + DBUG_RETURN(-1); }//if if (tAttrInfo->m_pk) { if (theOperationType == InsertRequest) { - return equal_impl(tAttrInfo, aValuePassed, len); + DBUG_RETURN(equal_impl(tAttrInfo, aValuePassed, len)); } else { setErrorCodeAbort(4202); - return -1; + DBUG_RETURN(-1); }//if }//if if (len > 8000) { setErrorCodeAbort(4216); - return -1; + DBUG_RETURN(-1); }//if tAttrId = tAttrInfo->m_attrId; @@ -496,13 +504,13 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, insertATTRINFO(ahValue); // Insert Attribute Id with the value // NULL into ATTRINFO part. - return 0; + DBUG_RETURN(0); } else { /*********************************************************************** * Setting a NULL value on a NOT NULL attribute is not allowed. **********************************************************************/ setErrorCodeAbort(4203); - return -1; + DBUG_RETURN(-1); }//if }//if @@ -522,7 +530,7 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, const Uint32 bitsInLastWord = 8 * (sizeInBytes & 3) ; if (len != sizeInBytes && (len != 0)) { setErrorCodeAbort(4209); - return -1; + DBUG_RETURN(-1); }//if const Uint32 totalSizeInWords = (sizeInBytes + 3)/4; // Including bits in last word const Uint32 sizeInWords = sizeInBytes / 4; // Excluding bits in last word @@ -550,7 +558,7 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, tReturnCode = insertATTRINFOloop((Uint32*)aValue, sizeInWords); if (tReturnCode == -1) { - return tReturnCode; + DBUG_RETURN(tReturnCode); }//if if (bitsInLastWord != 0) { tData = *(Uint32*)(aValue + sizeInWords*4); @@ -559,11 +567,11 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo, tData = convertEndian(tData); tReturnCode = insertATTRINFO(tData); if (tReturnCode == -1) { - return tReturnCode; + DBUG_RETURN(tReturnCode); }//if }//if theErrorLine++; - return 0; + DBUG_RETURN(0); }//NdbOperation::setValue() NdbBlob* diff --git a/ndb/src/ndbapi/NdbOperationSearch.cpp b/ndb/src/ndbapi/NdbOperationSearch.cpp index 70850bcc66b..6e76287eef0 100644 --- a/ndb/src/ndbapi/NdbOperationSearch.cpp +++ b/ndb/src/ndbapi/NdbOperationSearch.cpp @@ -57,8 +57,16 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, const char* aValuePassed, Uint32 aVariableKeyLen) { - register Uint32 tAttrId; + DBUG_ENTER("NdbOperation::equal_impl"); + DBUG_PRINT("enter", ("col=%s op=%d val=0x%x len=%u", + tAttrInfo->m_name.c_str(), + theOperationType, + aValuePassed, aVariableKeyLen)); + if (aValuePassed != NULL) + DBUG_DUMP("value", (char*)aValuePassed, aVariableKeyLen); + register Uint32 tAttrId; + Uint32 tData; Uint32 tKeyInfoPosition; const char* aValue = aValuePassed; @@ -120,7 +128,9 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, theTupleKeyDefined[i][1] = tKeyInfoPosition; theTupleKeyDefined[i][2] = true; + OperationType tOpType = theOperationType; Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize; + { /************************************************************************ * Check if the pointer of the value passed is aligned on a 4 byte @@ -176,7 +186,6 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, }//if #endif - OperationType tOpType = theOperationType; /************************************************************************** * If the operation is an insert request and the attribute is stored then * we also set the value in the stored part through putting the @@ -231,7 +240,7 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, } else { theStatus = SetValue; }//if - return 0; + DBUG_RETURN(0); } else if ((tOpType == ReadRequest) || (tOpType == DeleteRequest) || (tOpType == ReadExclusive)) { theStatus = GetValue; @@ -242,42 +251,42 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, assert(c != 0); if (c->getBlobType()) { if (getBlobHandle(theNdbCon, c) == NULL) - return -1; + DBUG_RETURN(-1); } } } - return 0; + DBUG_RETURN(0); } else if ((tOpType == InsertRequest) || (tOpType == WriteRequest)) { theStatus = SetValue; - return 0; + DBUG_RETURN(0); } else { setErrorCodeAbort(4005); - return -1; + DBUG_RETURN(-1); }//if - return 0; + DBUG_RETURN(0); }//if } else { - return -1; + DBUG_RETURN(-1); }//if - return 0; + DBUG_RETURN(0); } if (aValue == NULL) { // NULL value in primary key setErrorCodeAbort(4505); - return -1; + DBUG_RETURN(-1); }//if if ( tAttrInfo == NULL ) { // Attribute name not found in table setErrorCodeAbort(4004); - return -1; + DBUG_RETURN(-1); }//if if (theStatus == GetValue || theStatus == SetValue){ // All pk's defined setErrorCodeAbort(4225); - return -1; + DBUG_RETURN(-1); }//if ndbout_c("theStatus: %d", theStatus); @@ -285,19 +294,19 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo, // If we come here, set a general errorcode // and exit setErrorCodeAbort(4200); - return -1; + DBUG_RETURN(-1); equal_error1: setErrorCodeAbort(4205); - return -1; + DBUG_RETURN(-1); equal_error2: setErrorCodeAbort(4206); - return -1; + DBUG_RETURN(-1); equal_error3: setErrorCodeAbort(4209); - return -1; + DBUG_RETURN(-1); } /****************************************************************************** diff --git a/ndb/src/ndbapi/NdbRecAttr.cpp b/ndb/src/ndbapi/NdbRecAttr.cpp index e6e97ab60b1..57f896e7e42 100644 --- a/ndb/src/ndbapi/NdbRecAttr.cpp +++ b/ndb/src/ndbapi/NdbRecAttr.cpp @@ -15,17 +15,6 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/************************************************************************************************ -Name: NdbRecAttr.C -Include: -Link: -Author: UABRONM Mikael Ronström UAB/B/SD -Date: 971206 -Version: 0.1 -Description: Interface between TIS and NDB -Documentation: -Adjust: 971206 UABRONM First version -************************************************************************************************/ #include #include #include @@ -148,6 +137,39 @@ NdbRecAttr::receive_data(const Uint32 * data, Uint32 sz){ return false; } +static void +ndbrecattr_print_string(NdbOut& out, const char *type, + const char *ref, unsigned sz) +{ + int i, len, printable= 1; + // trailing zeroes are not printed + for (i=sz-1; i >= 0; i--) + if (ref[i] == 0) sz--; + else break; + if (sz == 0) return; // empty + + for (len=0; len < (int)sz && ref[i] != 0; len++) + if (printable && !isprint((int)ref[i])) + printable= 0; + + if (printable) + out.print("%.*s", len, ref); + else + { + out.print("0x"); + for (i=0; i < len; i++) + out.print("%02X", (int)ref[i]); + } + if (len != (int)sz) + { + out.print("["); + for (i= len+1; ref[i] != 0; i++) + out.print("%u]",len-i); + assert((int)sz > i); + ndbrecattr_print_string(out,type,ref+i,sz-i); + } +} + NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) { if (r.isNULL()) @@ -193,17 +215,21 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) case NdbDictionary::Column::Tinyint: out << (int) r.char_value(); break; + case NdbDictionary::Column::Binary: + ndbrecattr_print_string(out,"Binary",r.aRef(),r.arraySize()); + j = r.arraySize(); + break; case NdbDictionary::Column::Char: - out.print("%.*s", r.arraySize(), r.aRef()); + ndbrecattr_print_string(out,"Char",r.aRef(),r.arraySize()); j = r.arraySize(); break; case NdbDictionary::Column::Varchar: - { - short len = ntohs(r.u_short_value()); - out.print("%.*s", len, r.aRef()+2); - } - j = r.arraySize(); - break; + { + unsigned len = *(const unsigned char*)r.aRef(); + ndbrecattr_print_string(out,"Varchar", r.aRef()+1,len); + j = r.arraySize(); + } + break; case NdbDictionary::Column::Float: out << r.float_value(); break; @@ -232,6 +258,13 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r) j = r.arraySize(); } break; + case NdbDictionary::Column::Longvarchar: + { + unsigned len = uint2korr(r.aRef()); + ndbrecattr_print_string(out,"Longvarchar", r.aRef()+2,len); + j = r.arraySize(); + } + break; default: /* no print functions for the rest, just print type */ out << (int) r.getType(); j = r.arraySize(); diff --git a/ndb/src/ndbapi/NdbTransaction.cpp b/ndb/src/ndbapi/NdbTransaction.cpp index d010c0ae0d8..f6664657f73 100644 --- a/ndb/src/ndbapi/NdbTransaction.cpp +++ b/ndb/src/ndbapi/NdbTransaction.cpp @@ -1092,7 +1092,11 @@ NdbTransaction::getNdbIndexScanOperation(const char* anIndexName, { NdbIndexImpl* index = theNdb->theDictionary->getIndex(anIndexName, aTableName); + if (index == 0) + return 0; NdbTableImpl* table = theNdb->theDictionary->getTable(aTableName); + if (table == 0) + return 0; return getNdbIndexScanOperation(index, table); } diff --git a/ndb/src/ndbapi/Ndberr.cpp b/ndb/src/ndbapi/Ndberr.cpp index 07f33d3e8b3..b05818de6f1 100644 --- a/ndb/src/ndbapi/Ndberr.cpp +++ b/ndb/src/ndbapi/Ndberr.cpp @@ -21,7 +21,7 @@ #include #include #include - +#include "NdbEventOperationImpl.hpp" static void update(const NdbError & _err){ @@ -73,3 +73,10 @@ NdbBlob::getNdbError() const { update(theError); return theError; } + +const +NdbError & +NdbEventOperationImpl::getNdbError() const { + update(m_error); + return m_error; +} diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index dd4a1cf0b9e..1e87e8481d0 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -55,6 +55,8 @@ typedef struct ErrorBundle { #define NI ndberror_cl_function_not_implemented #define UE ndberror_cl_unknown_error_code +#define OE ndberror_cl_schema_object_already_exists + static const char REDO_BUFFER_MSG[]= "REDO log buffers overloaded, consult online manual (increase RedoBuffer, and|or decrease TimeBetweenLocalCheckpoints, and|or increase NoOfFragmentLogFiles)"; @@ -79,6 +81,7 @@ static const char* empty_string = ""; * 4400 - "" * 4500 - "" * 4600 - "" + * 4700 - "" Event * 5000 - Management server */ @@ -296,6 +299,30 @@ ErrorBundle ErrorCodes[] = { { 4232, AE, "Parallelism can only be between 1 and 240" }, { 290, AE, "Scan not started or has been closed by kernel due to timeout" }, + /** + * Event schema errors + */ + + { 4713, SE, "Column defined in event does not exist in table"}, + + /** + * Event application errors + */ + + { 4707, AE, "Too many event have been defined"}, + { 4708, AE, "Event name is too long"}, + { 4709, AE, "Can't accept more subscribers"}, + { 746, OE, "Event name already exists"}, + { 4710, AE, "Event not found"}, + { 4711, AE, "Creation of event failed"}, + { 4712, AE, "Stopped event operation does not exist. Already stopped?"}, + + /** + * Event internal errors + */ + + { 4731, IE, "Event not found"}, + /** * SchemaError */ @@ -306,7 +333,7 @@ ErrorBundle ErrorCodes[] = { { 707, SE, "No more table metadata records" }, { 708, SE, "No more attribute metadata records" }, { 709, SE, "No such table existed" }, - { 721, SE, "Table or index with given name already exists" }, + { 721, OE, "Table or index with given name already exists" }, { 723, SE, "No such table existed" }, { 736, SE, "Unsupported array size" }, { 737, SE, "Attribute array size too big" }, @@ -466,7 +493,7 @@ ErrorBundle ErrorCodes[] = { { 4241, AE, "Index name too long" }, { 4242, AE, "Too many indexes" }, { 4243, AE, "Index not found" }, - { 4244, AE, "Index or table with given name already exists" }, + { 4244, OE, "Index or table with given name already exists" }, { 4245, AE, "Index attribute must be defined as stored, i.e. the StorageAttributeType must be defined as NormalStorageAttribute"}, { 4247, AE, "Illegal index/trigger create/drop/alter request" }, { 4248, AE, "Trigger/index name invalid" }, diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp index adaccebed6e..8a09a2ec9d1 100644 --- a/ndb/test/ndbapi/testOIBasic.cpp +++ b/ndb/test/ndbapi/testOIBasic.cpp @@ -28,7 +28,9 @@ #include #include #include +#include #include +#include // options @@ -72,7 +74,7 @@ struct Opt { m_die(0), m_dups(false), m_fragtype(NdbDictionary::Object::FragUndefined), - m_subsubloop(2), + m_subsubloop(4), m_index(0), m_loop(1), m_msglock(true), @@ -87,7 +89,7 @@ struct Opt { m_seed(-1), m_subloop(4), m_table(0), - m_threads(10), + m_threads(4), m_v(1) { } }; @@ -257,6 +259,7 @@ struct Par : public Opt { bool m_verify; // deadlock possible bool m_deadlock; + NdbOperation::LockMode m_lockmode; // ordered range scan bool m_ordered; bool m_descending; @@ -278,6 +281,7 @@ struct Par : public Opt { m_randomkey(false), m_verify(false), m_deadlock(false), + m_lockmode(NdbOperation::LM_Read), m_ordered(false), m_descending(false) { } @@ -598,7 +602,9 @@ getcs(Par par) struct Col { enum Type { Unsigned = NdbDictionary::Column::Unsigned, - Char = NdbDictionary::Column::Char + Char = NdbDictionary::Column::Char, + Varchar = NdbDictionary::Column::Varchar, + Longvarchar = NdbDictionary::Column::Longvarchar }; const class Tab& m_tab; unsigned m_num; @@ -612,7 +618,7 @@ struct Col { Col(const class Tab& tab, unsigned num, const char* name, bool pk, Type type, unsigned length, bool nullable, const Chs* chs); ~Col(); bool equal(const Col& col2) const; - void verify(const void* addr) const; + void wellformed(const void* addr) const; }; Col::Col(const class Tab& tab, unsigned num, const char* name, bool pk, Type type, unsigned length, bool nullable, const Chs* chs) : @@ -626,6 +632,9 @@ Col::Col(const class Tab& tab, unsigned num, const char* name, bool pk, Type typ m_nullable(nullable), m_chs(chs) { + // fix long varchar + if (type == Varchar && m_bytelength > 255) + m_type = Longvarchar; } Col::~Col() @@ -640,7 +649,7 @@ Col::equal(const Col& col2) const } void -Col::verify(const void* addr) const +Col::wellformed(const void* addr) const { switch (m_type) { case Col::Unsigned: @@ -653,6 +662,26 @@ Col::verify(const void* addr) const assert((*cs->cset->well_formed_len)(cs, src, src + len, 0xffff) == len); } break; + case Col::Varchar: + { + CHARSET_INFO* cs = m_chs->m_cs; + const unsigned char* src = (const unsigned char*)addr; + const char* ssrc = (const char*)src; + unsigned len = src[0]; + assert(len <= m_bytelength); + assert((*cs->cset->well_formed_len)(cs, ssrc + 1, ssrc + 1 + len, 0xffff) == len); + } + break; + case Col::Longvarchar: + { + CHARSET_INFO* cs = m_chs->m_cs; + const unsigned char* src = (const unsigned char*)addr; + const char* ssrc = (const char*)src; + unsigned len = src[0] + (src[1] << 8); + assert(len <= m_bytelength); + assert((*cs->cset->well_formed_len)(cs, ssrc + 2, ssrc + 2 + len, 0xffff) == len); + } + break; default: assert(false); break; @@ -673,6 +702,18 @@ operator<<(NdbOut& out, const Col& col) out << " char(" << col.m_length << "*" << cs->mbmaxlen << ";" << cs->name << ")"; } break; + case Col::Varchar: + { + CHARSET_INFO* cs = col.m_chs->m_cs; + out << " varchar(" << col.m_length << "*" << cs->mbmaxlen << ";" << cs->name << ")"; + } + break; + case Col::Longvarchar: + { + CHARSET_INFO* cs = col.m_chs->m_cs; + out << " longvarchar(" << col.m_length << "*" << cs->mbmaxlen << ";" << cs->name << ")"; + } + break; default: out << "type" << (int)col.m_type; assert(false); @@ -928,35 +969,27 @@ makebuiltintables(Par par) t->itabadd(2, x); } if (useindex(par, 3)) { - // d, c, b - ITab* x = new ITab(*t, "ti0x3", ITab::OrderedIndex, 3); - x->icoladd(0, new ICol(*x, 0, *t->m_col[3])); - x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); - x->icoladd(2, new ICol(*x, 2, *t->m_col[1])); - t->itabadd(3, x); - } - if (useindex(par, 4)) { // b, e, c, d - ITab* x = new ITab(*t, "ti0x4", ITab::OrderedIndex, 4); + ITab* x = new ITab(*t, "ti0x3", ITab::OrderedIndex, 4); x->icoladd(0, new ICol(*x, 0, *t->m_col[1])); x->icoladd(1, new ICol(*x, 1, *t->m_col[4])); x->icoladd(2, new ICol(*x, 2, *t->m_col[2])); x->icoladd(3, new ICol(*x, 3, *t->m_col[3])); + t->itabadd(3, x); + } + if (useindex(par, 4)) { + // a, c + ITab* x = new ITab(*t, "ti0z4", ITab::UniqueHashIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); t->itabadd(4, x); } if (useindex(par, 5)) { - // a, c + // a, e ITab* x = new ITab(*t, "ti0z5", ITab::UniqueHashIndex, 2); x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); - x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); - t->itabadd(5, x); - } - if (useindex(par, 6)) { - // a, e - ITab* x = new ITab(*t, "ti0z6", ITab::UniqueHashIndex, 2); - x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); x->icoladd(1, new ICol(*x, 1, *t->m_col[4])); - t->itabadd(6, x); + t->itabadd(5, x); } tablist[0] = t; } @@ -967,56 +1000,50 @@ makebuiltintables(Par par) t->coladd(0, new Col(*t, 0, "a", 0, Col::Unsigned, 1, 0, 0)); t->coladd(1, new Col(*t, 1, "b", 1, Col::Unsigned, 1, 0, 0)); t->coladd(2, new Col(*t, 2, "c", 0, Col::Char, 20, 1, getcs(par))); - t->coladd(3, new Col(*t, 3, "d", 0, Col::Char, 5, 0, getcs(par))); - t->coladd(4, new Col(*t, 4, "e", 0, Col::Char, 5, 1, getcs(par))); + t->coladd(3, new Col(*t, 3, "d", 0, Col::Varchar, 5, 0, getcs(par))); + t->coladd(4, new Col(*t, 4, "e", 0, Col::Longvarchar, 5, 1, getcs(par))); if (useindex(par, 0)) { // b ITab* x = new ITab(*t, "ti1x0", ITab::OrderedIndex, 1); x->icoladd(0, new ICol(*x, 0, *t->m_col[1])); + t->itabadd(0, x); } if (useindex(par, 1)) { - // a, c + // c, a ITab* x = new ITab(*t, "ti1x1", ITab::OrderedIndex, 2); - x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); - x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); + x->icoladd(0, new ICol(*x, 0, *t->m_col[2])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[0])); t->itabadd(1, x); } if (useindex(par, 2)) { - // c, a - ITab* x = new ITab(*t, "ti1x2", ITab::OrderedIndex, 2); - x->icoladd(0, new ICol(*x, 0, *t->m_col[2])); - x->icoladd(1, new ICol(*x, 1, *t->m_col[0])); + // d + ITab* x = new ITab(*t, "ti1x2", ITab::OrderedIndex, 1); + x->icoladd(0, new ICol(*x, 0, *t->m_col[3])); t->itabadd(2, x); } if (useindex(par, 3)) { - // e - ITab* x = new ITab(*t, "ti1x3", ITab::OrderedIndex, 1); - x->icoladd(0, new ICol(*x, 0, *t->m_col[4])); - t->itabadd(3, x); - } - if (useindex(par, 4)) { // e, d, c, b - ITab* x = new ITab(*t, "ti1x4", ITab::OrderedIndex, 4); + ITab* x = new ITab(*t, "ti1x3", ITab::OrderedIndex, 4); x->icoladd(0, new ICol(*x, 0, *t->m_col[4])); x->icoladd(1, new ICol(*x, 1, *t->m_col[3])); x->icoladd(2, new ICol(*x, 2, *t->m_col[2])); x->icoladd(3, new ICol(*x, 3, *t->m_col[1])); + t->itabadd(3, x); + } + if (useindex(par, 4)) { + // a, b + ITab* x = new ITab(*t, "ti1z4", ITab::UniqueHashIndex, 2); + x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[1])); t->itabadd(4, x); } if (useindex(par, 5)) { - // a, b - ITab* x = new ITab(*t, "ti1z5", ITab::UniqueHashIndex, 2); - x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); - x->icoladd(1, new ICol(*x, 1, *t->m_col[1])); - t->itabadd(5, x); - } - if (useindex(par, 6)) { // a, b, d - ITab* x = new ITab(*t, "ti1z6", ITab::UniqueHashIndex, 3); + ITab* x = new ITab(*t, "ti1z5", ITab::UniqueHashIndex, 3); x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); x->icoladd(1, new ICol(*x, 1, *t->m_col[1])); x->icoladd(2, new ICol(*x, 2, *t->m_col[3])); - t->itabadd(6, x); + t->itabadd(5, x); } tablist[1] = t; } @@ -1027,8 +1054,8 @@ makebuiltintables(Par par) t->coladd(0, new Col(*t, 0, "a", 1, Col::Char, 31, 0, getcs(par))); t->coladd(1, new Col(*t, 1, "b", 0, Col::Char, 4, 1, getcs(par))); t->coladd(2, new Col(*t, 2, "c", 1, Col::Unsigned, 1, 0, 0)); - t->coladd(3, new Col(*t, 3, "d", 1, Col::Char, 3, 0, getcs(par))); - t->coladd(4, new Col(*t, 4, "e", 0, Col::Char, 17, 0, getcs(par))); + t->coladd(3, new Col(*t, 3, "d", 1, Col::Varchar, 128, 0, getcs(par))); + t->coladd(4, new Col(*t, 4, "e", 0, Col::Varchar, 7, 0, getcs(par))); if (useindex(par, 0)) { // a, c, d ITab* x = new ITab(*t, "ti2x0", ITab::OrderedIndex, 3); @@ -1060,27 +1087,20 @@ makebuiltintables(Par par) t->itabadd(3, x); } if (useindex(par, 4)) { - // a, e - ITab* x = new ITab(*t, "ti2x4", ITab::OrderedIndex, 2); + // a, c + ITab* x = new ITab(*t, "ti2z4", ITab::UniqueHashIndex, 2); x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); - x->icoladd(1, new ICol(*x, 1, *t->m_col[4])); + x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); t->itabadd(4, x); } if (useindex(par, 5)) { - // a, c - ITab* x = new ITab(*t, "ti2z5", ITab::UniqueHashIndex, 2); - x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); - x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); - t->itabadd(5, x); - } - if (useindex(par, 6)) { // a, c, d, e - ITab* x = new ITab(*t, "ti2z6", ITab::UniqueHashIndex, 4); + ITab* x = new ITab(*t, "ti2z5", ITab::UniqueHashIndex, 4); x->icoladd(0, new ICol(*x, 0, *t->m_col[0])); x->icoladd(1, new ICol(*x, 1, *t->m_col[2])); x->icoladd(2, new ICol(*x, 2, *t->m_col[3])); x->icoladd(3, new ICol(*x, 3, *t->m_col[4])); - t->itabadd(6, x); + t->itabadd(5, x); } tablist[2] = t; } @@ -1115,19 +1135,19 @@ struct Con { void disconnect(); int startTransaction(); int getNdbOperation(const Tab& tab); + int getNdbIndexOperation1(const ITab& itab, const Tab& tab); int getNdbIndexOperation(const ITab& itab, const Tab& tab); int getNdbScanOperation(const Tab& tab); - int getNdbScanOperation(const ITab& itab, const Tab& tab); + int getNdbIndexScanOperation1(const ITab& itab, const Tab& tab); + int getNdbIndexScanOperation(const ITab& itab, const Tab& tab); int equal(int num, const char* addr); int getValue(int num, NdbRecAttr*& rec); int setValue(int num, const char* addr); int setBound(int num, int type, const void* value); int execute(ExecType t); int execute(ExecType t, bool& deadlock); - int openScanRead(unsigned scanbat, unsigned scanpar); - int openScanExclusive(unsigned scanbat, unsigned scanpar); - int openScanOrdered(unsigned scanbat, unsigned scanpar, bool descending); - int openScanOrderedExclusive(unsigned scanbat, unsigned scanpar, bool descending); + int readTuples(Par par); + int readIndexTuples(Par par); int executeScan(); int nextScanResult(bool fetchAllowed); int nextScanResult(bool fetchAllowed, bool& deadlock); @@ -1182,13 +1202,27 @@ Con::getNdbOperation(const Tab& tab) } int -Con::getNdbIndexOperation(const ITab& itab, const Tab& tab) +Con::getNdbIndexOperation1(const ITab& itab, const Tab& tab) { assert(m_tx != 0); CHKCON((m_op = m_indexop = m_tx->getNdbIndexOperation(itab.m_name, tab.m_name)) != 0, *this); return 0; } +int +Con::getNdbIndexOperation(const ITab& itab, const Tab& tab) +{ + assert(m_tx != 0); + unsigned tries = 0; + while (1) { + if (getNdbIndexOperation1(itab, tab) == 0) + break; + CHK(++tries < 10); + NdbSleep_MilliSleep(100); + } + return 0; +} + int Con::getNdbScanOperation(const Tab& tab) { @@ -1198,13 +1232,27 @@ Con::getNdbScanOperation(const Tab& tab) } int -Con::getNdbScanOperation(const ITab& itab, const Tab& tab) +Con::getNdbIndexScanOperation1(const ITab& itab, const Tab& tab) { assert(m_tx != 0); CHKCON((m_op = m_scanop = m_indexscanop = m_tx->getNdbIndexScanOperation(itab.m_name, tab.m_name)) != 0, *this); return 0; } +int +Con::getNdbIndexScanOperation(const ITab& itab, const Tab& tab) +{ + assert(m_tx != 0); + unsigned tries = 0; + while (1) { + if (getNdbIndexScanOperation1(itab, tab) == 0) + break; + CHK(++tries < 10); + NdbSleep_MilliSleep(100); + } + return 0; +} + int Con::equal(int num, const char* addr) { @@ -1262,42 +1310,21 @@ Con::execute(ExecType t, bool& deadlock) } int -Con::openScanRead(unsigned scanbat, unsigned scanpar) +Con::readTuples(Par par) { assert(m_tx != 0 && m_scanop != 0); - NdbOperation::LockMode lm = NdbOperation::LM_Read; - CHKCON(m_scanop->readTuples(lm, scanbat, scanpar) == 0, *this); + CHKCON(m_scanop->readTuples(par.m_lockmode, par.m_scanbat, par.m_scanpar) == 0, *this); return 0; } int -Con::openScanExclusive(unsigned scanbat, unsigned scanpar) -{ - assert(m_tx != 0 && m_scanop != 0); - NdbOperation::LockMode lm = NdbOperation::LM_Exclusive; - CHKCON(m_scanop->readTuples(lm, scanbat, scanpar) == 0, *this); - return 0; -} - -int -Con::openScanOrdered(unsigned scanbat, unsigned scanpar, bool descending) +Con::readIndexTuples(Par par) { assert(m_tx != 0 && m_indexscanop != 0); - NdbOperation::LockMode lm = NdbOperation::LM_Read; - CHKCON(m_indexscanop->readTuples(lm, scanbat, scanpar, true, descending) == 0, *this); + CHKCON(m_indexscanop->readTuples(par.m_lockmode, par.m_scanbat, par.m_scanpar, par.m_ordered, par.m_descending) == 0, *this); return 0; } -int -Con::openScanOrderedExclusive(unsigned scanbat, unsigned scanpar, bool descending) -{ - assert(m_tx != 0 && m_indexscanop != 0); - NdbOperation::LockMode lm = NdbOperation::LM_Exclusive; - CHKCON(m_indexscanop->readTuples(lm, scanbat, scanpar, true, descending) == 0, *this); - return 0; -} - - int Con::executeScan() { @@ -1564,6 +1591,8 @@ struct Val { union { Uint32 m_uint32; unsigned char* m_char; + unsigned char* m_varchar; + unsigned char* m_longvarchar; }; Val(const Col& col); ~Val(); @@ -1576,9 +1605,12 @@ struct Val { int setval(Par par) const; void calc(Par par, unsigned i); void calckey(Par par, unsigned i); + void calckeychars(Par par, unsigned i, unsigned& n, unsigned char* buf); void calcnokey(Par par); - int verify(const Val& val2) const; - int cmp(const Val& val2) const; + void calcnokeychars(Par par, unsigned& n, unsigned char* buf); + int verify(Par par, const Val& val2) const; + int cmp(Par par, const Val& val2) const; + int cmpchars(Par par, const unsigned char* buf1, unsigned len1, const unsigned char* buf2, unsigned len2) const; private: Val& operator=(const Val& val2); }; @@ -1595,6 +1627,12 @@ Val::Val(const Col& col) : case Col::Char: m_char = new unsigned char [col.m_bytelength]; break; + case Col::Varchar: + m_varchar = new unsigned char [1 + col.m_bytelength]; + break; + case Col::Longvarchar: + m_longvarchar = new unsigned char [2 + col.m_bytelength]; + break; default: assert(false); break; @@ -1610,6 +1648,12 @@ Val::~Val() case Col::Char: delete [] m_char; break; + case Col::Varchar: + delete [] m_varchar; + break; + case Col::Longvarchar: + delete [] m_longvarchar; + break; default: assert(false); break; @@ -1640,6 +1684,12 @@ Val::copy(const void* addr) case Col::Char: memcpy(m_char, addr, col.m_bytelength); break; + case Col::Varchar: + memcpy(m_varchar, addr, 1 + col.m_bytelength); + break; + case Col::Longvarchar: + memcpy(m_longvarchar, addr, 2 + col.m_bytelength); + break; default: assert(false); break; @@ -1656,6 +1706,10 @@ Val::dataaddr() const return &m_uint32; case Col::Char: return m_char; + case Col::Varchar: + return m_varchar; + case Col::Longvarchar: + return m_longvarchar; default: break; } @@ -1704,7 +1758,7 @@ Val::calc(Par par, unsigned i) const Col& col = m_col; col.m_pk ? calckey(par, i) : calcnokey(par); if (! m_null) - col.verify(dataaddr()); + col.wellformed(dataaddr()); } void @@ -1721,25 +1775,54 @@ Val::calckey(Par par, unsigned i) const Chs* chs = col.m_chs; CHARSET_INFO* cs = chs->m_cs; unsigned n = 0; - // our random chars may not fill value exactly - while (n + cs->mbmaxlen <= col.m_bytelength) { - if (i % (1 + n) == 0) { - break; - } - const Chr& chr = chs->m_chr[i % maxcharcount]; - memcpy(&m_char[n], chr.m_bytes, chr.m_size); - n += chr.m_size; - } - // this will extend by appropriate space + calckeychars(par, i, n, m_char); + // extend by appropriate space (*cs->cset->fill)(cs, (char*)&m_char[n], col.m_bytelength - n, 0x20); } break; + case Col::Varchar: + { + unsigned n = 0; + calckeychars(par, i, n, m_varchar + 1); + // set length and pad with nulls + m_varchar[0] = n; + memset(&m_varchar[1 + n], 0, col.m_bytelength - n); + } + break; + case Col::Longvarchar: + { + unsigned n = 0; + calckeychars(par, i, n, m_longvarchar + 2); + // set length and pad with nulls + m_longvarchar[0] = (n & 0xff); + m_longvarchar[1] = (n >> 8); + memset(&m_longvarchar[2 + n], 0, col.m_bytelength - n); + } + break; default: assert(false); break; } } +void +Val::calckeychars(Par par, unsigned i, unsigned& n, unsigned char* buf) +{ + const Col& col = m_col; + const Chs* chs = col.m_chs; + CHARSET_INFO* cs = chs->m_cs; + n = 0; + // our random chars may not fill value exactly + while (n + cs->mbmaxlen <= col.m_bytelength) { + if (i % (1 + n) == 0) { + break; + } + const Chr& chr = chs->m_chr[i % maxcharcount]; + memcpy(buf + n, chr.m_bytes, chr.m_size); + n += chr.m_size; + } +} + void Val::calcnokey(Par par) { @@ -1764,42 +1847,71 @@ Val::calcnokey(Par par) const Chs* chs = col.m_chs; CHARSET_INFO* cs = chs->m_cs; unsigned n = 0; - // our random chars may not fill value exactly - while (n + cs->mbmaxlen <= col.m_bytelength) { - if (urandom(1 + col.m_bytelength) == 0) { - break; - } - unsigned half = maxcharcount / 2; - int r = irandom((par.m_pctrange * half) / 100); - if (par.m_bdir != 0 && urandom(10) != 0) { - if (r < 0 && par.m_bdir > 0 || r > 0 && par.m_bdir < 0) - r = -r; - } - unsigned i = half + r; - assert(i < maxcharcount); - const Chr& chr = chs->m_chr[i]; - memcpy(&m_char[n], chr.m_bytes, chr.m_size); - n += chr.m_size; - } - // this will extend by appropriate space + calcnokeychars(par, n, m_char); + // extend by appropriate space (*cs->cset->fill)(cs, (char*)&m_char[n], col.m_bytelength - n, 0x20); } break; + case Col::Varchar: + { + unsigned n = 0; + calcnokeychars(par, n, m_varchar + 1); + // set length and pad with nulls + m_varchar[0] = n; + memset(&m_varchar[1 + n], 0, col.m_bytelength - n); + } + break; + case Col::Longvarchar: + { + unsigned n = 0; + calcnokeychars(par, n, m_longvarchar + 2); + // set length and pad with nulls + m_longvarchar[0] = (n & 0xff); + m_longvarchar[1] = (n >> 8); + memset(&m_longvarchar[2 + n], 0, col.m_bytelength - n); + } + break; default: assert(false); break; } } -int -Val::verify(const Val& val2) const +void +Val::calcnokeychars(Par par, unsigned& n, unsigned char* buf) { - CHK(cmp(val2) == 0); + const Col& col = m_col; + const Chs* chs = col.m_chs; + CHARSET_INFO* cs = chs->m_cs; + n = 0; + // our random chars may not fill value exactly + while (n + cs->mbmaxlen <= col.m_bytelength) { + if (urandom(1 + col.m_bytelength) == 0) { + break; + } + unsigned half = maxcharcount / 2; + int r = irandom((par.m_pctrange * half) / 100); + if (par.m_bdir != 0 && urandom(10) != 0) { + if (r < 0 && par.m_bdir > 0 || r > 0 && par.m_bdir < 0) + r = -r; + } + unsigned i = half + r; + assert(i < maxcharcount); + const Chr& chr = chs->m_chr[i]; + memcpy(buf + n, chr.m_bytes, chr.m_size); + n += chr.m_size; + } +} + +int +Val::verify(Par par, const Val& val2) const +{ + CHK(cmp(par, val2) == 0); return 0; } int -Val::cmp(const Val& val2) const +Val::cmp(Par par, const Val& val2) const { const Col& col = m_col; const Col& col2 = val2.m_col; @@ -1812,8 +1924,8 @@ Val::cmp(const Val& val2) const return 0; } // verify data formats - col.verify(dataaddr()); - col.verify(val2.dataaddr()); + col.wellformed(dataaddr()); + col.wellformed(val2.dataaddr()); // compare switch (col.m_type) { case Col::Unsigned: @@ -1827,25 +1939,22 @@ Val::cmp(const Val& val2) const break; case Col::Char: { - const Chs* chs = col.m_chs; - CHARSET_INFO* cs = chs->m_cs; unsigned len = col.m_bytelength; - int k; - if (! g_opt.m_collsp) { - unsigned char x1[maxxmulsize * 8000]; - unsigned char x2[maxxmulsize * 8000]; - int n1 = (*cs->coll->strnxfrm)(cs, x1, chs->m_xmul * len, m_char, len); - int n2 = (*cs->coll->strnxfrm)(cs, x2, chs->m_xmul * len, val2.m_char, len); - // currently same but do not assume it - unsigned n = (n1 > n2 ? n1 : n2); - // assume null padding - memset(x1 + n1, 0x0, n - n1); - memset(x2 + n2, 0x0, n - n2); - k = memcmp(x1, x2, n); - } else { - k = (*cs->coll->strnncollsp)(cs, m_char, len, val2.m_char, len, false); - } - return k < 0 ? -1 : k > 0 ? +1 : 0; + return cmpchars(par, m_char, len, val2.m_char, len); + } + break; + case Col::Varchar: + { + unsigned len1 = m_varchar[0]; + unsigned len2 = val2.m_varchar[0]; + return cmpchars(par, m_varchar + 1, len1, val2.m_varchar + 1, len2); + } + break; + case Col::Longvarchar: + { + unsigned len1 = m_longvarchar[0] + (m_longvarchar[1] << 8); + unsigned len2 = val2.m_longvarchar[0] + (val2.m_longvarchar[1] << 8); + return cmpchars(par, m_longvarchar + 2, len1, val2.m_longvarchar + 2, len2); } break; default: @@ -1855,6 +1964,56 @@ Val::cmp(const Val& val2) const return 0; } +int +Val::cmpchars(Par par, const unsigned char* buf1, unsigned len1, const unsigned char* buf2, unsigned len2) const +{ + const Col& col = m_col; + const Chs* chs = col.m_chs; + CHARSET_INFO* cs = chs->m_cs; + int k; + if (! par.m_collsp) { + unsigned char x1[maxxmulsize * 8000]; + unsigned char x2[maxxmulsize * 8000]; + // make strxfrm pad both to same length + unsigned len = maxxmulsize * col.m_bytelength; + int n1 = NdbSqlUtil::strnxfrm_bug7284(cs, x1, chs->m_xmul * len, buf1, len1); + int n2 = NdbSqlUtil::strnxfrm_bug7284(cs, x2, chs->m_xmul * len, buf2, len2); + assert(n1 == n2); + k = memcmp(x1, x2, n1); + } else { + k = (*cs->coll->strnncollsp)(cs, buf1, len1, buf2, len2, false); + } + return k < 0 ? -1 : k > 0 ? +1 : 0; +} + +static void +printstring(NdbOut& out, const unsigned char* str, unsigned len, bool showlen) +{ + char buf[4 * 8000]; + char *p = buf; + *p++ = '['; + if (showlen) { + sprintf(p, "%u:", len); + p += strlen(p); + } + for (unsigned i = 0; i < len; i++) { + unsigned char c = str[i]; + if (c == '\\') { + *p++ = '\\'; + *p++ = c; + } else if (0x20 <= c && c < 0x7e) { + *p++ = c; + } else { + *p++ = '\\'; + *p++ = hexstr[c >> 4]; + *p++ = hexstr[c & 15]; + } + } + *p++ = ']'; + *p = 0; + out << buf; +} + static NdbOut& operator<<(NdbOut& out, const Val& val) { @@ -1869,25 +2028,20 @@ operator<<(NdbOut& out, const Val& val) break; case Col::Char: { - char buf[4 * 8000]; - char *p = buf; - *p++ = '['; - for (unsigned i = 0; i < col.m_bytelength; i++) { - unsigned char c = val.m_char[i]; - if (c == '\\') { - *p++ = '\\'; - *p++ = '\\'; - } else if (0x20 <= c && c < 0x7e) { - *p++ = c; - } else { - *p++ = '\\'; - *p++ = hexstr[c >> 4]; - *p++ = hexstr[c & 15]; - } - } - *p++ = ']'; - *p = 0; - out << buf; + unsigned len = col.m_bytelength; + printstring(out, val.m_char, len, false); + } + break; + case Col::Varchar: + { + unsigned len = val.m_varchar[0]; + printstring(out, val.m_varchar + 1, len, true); + } + break; + case Col::Longvarchar: + { + unsigned len = val.m_longvarchar[0] + (val.m_longvarchar[1] << 8); + printstring(out, val.m_longvarchar + 2, len, true); } break; default: @@ -1912,7 +2066,7 @@ struct Row { void copy(const Row& row2); void calc(Par par, unsigned i, unsigned mask = 0); const Row& dbrow() const; - int verify(const Row& row2) const; + int verify(Par par, const Row& row2) const; int insrow(Par par); int updrow(Par par); int updrow(Par par, const ITab& itab); @@ -1921,8 +2075,8 @@ struct Row { int selrow(Par par); int selrow(Par par, const ITab& itab); int setrow(Par par); - int cmp(const Row& row2) const; - int cmp(const Row& row2, const ITab& itab) const; + int cmp(Par par, const Row& row2) const; + int cmp(Par par, const Row& row2, const ITab& itab) const; private: Row& operator=(const Row& row2); }; @@ -1994,7 +2148,7 @@ Row::dbrow() const } int -Row::verify(const Row& row2) const +Row::verify(Par par, const Row& row2) const { const Tab& tab = m_tab; const Row& row1 = *this; @@ -2002,7 +2156,7 @@ Row::verify(const Row& row2) const for (unsigned k = 0; k < tab.m_cols; k++) { const Val& val1 = *row1.m_val[k]; const Val& val2 = *row2.m_val[k]; - CHK(val1.verify(val2) == 0); + CHK(val1.verify(par, val2) == 0); } return 0; } @@ -2169,7 +2323,7 @@ Row::setrow(Par par) } int -Row::cmp(const Row& row2) const +Row::cmp(Par par, const Row& row2) const { const Tab& tab = m_tab; assert(&tab == &row2.m_tab); @@ -2177,14 +2331,14 @@ Row::cmp(const Row& row2) const for (unsigned k = 0; k < tab.m_cols; k++) { const Val& val = *m_val[k]; const Val& val2 = *row2.m_val[k]; - if ((c = val.cmp(val2)) != 0) + if ((c = val.cmp(par, val2)) != 0) break; } return c; } int -Row::cmp(const Row& row2, const ITab& itab) const +Row::cmp(Par par, const Row& row2, const ITab& itab) const { const Tab& tab = m_tab; int c = 0; @@ -2195,7 +2349,7 @@ Row::cmp(const Row& row2, const ITab& itab) const assert(k < tab.m_cols); const Val& val = *m_val[k]; const Val& val2 = *row2.m_val[k]; - if ((c = val.cmp(val2)) != 0) + if ((c = val.cmp(par, val2)) != 0) break; } return c; @@ -2283,8 +2437,8 @@ struct Set { int getkey(Par par, unsigned* i); int putval(unsigned i, bool force, unsigned n = ~0); // verify - int verify(const Set& set2) const; - int verifyorder(const ITab& itab, bool descending) const; + int verify(Par par, const Set& set2) const; + int verifyorder(Par par, const ITab& itab, bool descending) const; // protect structure NdbMutex* m_mutex; void lock() const { @@ -2616,7 +2770,7 @@ Set::putval(unsigned i, bool force, unsigned n) // verify int -Set::verify(const Set& set2) const +Set::verify(Par par, const Set& set2) const { assert(&m_tab == &set2.m_tab && m_rows == set2.m_rows); LL4("verify set1 count=" << count() << " vs set2 count=" << set2.count()); @@ -2625,7 +2779,7 @@ Set::verify(const Set& set2) const if (exist(i) != set2.exist(i)) { ok = false; } else if (exist(i)) { - if (dbrow(i).verify(set2.dbrow(i)) != 0) + if (dbrow(i).verify(par, set2.dbrow(i)) != 0) ok = false; } if (! ok) { @@ -2637,7 +2791,7 @@ Set::verify(const Set& set2) const } int -Set::verifyorder(const ITab& itab, bool descending) const +Set::verifyorder(Par par, const ITab& itab, bool descending) const { const Tab& tab = m_tab; for (unsigned n = 0; n < m_rows; n++) { @@ -2652,9 +2806,9 @@ Set::verifyorder(const ITab& itab, bool descending) const const Row& row2 = *m_row[i2]; assert(row1.m_exist && row2.m_exist); if (! descending) - CHK(row1.cmp(row2, itab) <= 0); + CHK(row1.cmp(par, row2, itab) <= 0); else - CHK(row1.cmp(row2, itab) >= 0); + CHK(row1.cmp(par, row2, itab) >= 0); } return 0; } @@ -2724,7 +2878,7 @@ struct BSet { void calc(Par par); void calcpk(Par par, unsigned i); int setbnd(Par par) const; - void filter(const Set& set, Set& set2) const; + void filter(Par par, const Set& set, Set& set2) const; }; BSet::BSet(const Tab& tab, const ITab& itab, unsigned rows) : @@ -2797,7 +2951,7 @@ BSet::calc(Par par) assert(m_bvals >= 2); const BVal& bv1 = *m_bval[m_bvals - 2]; const BVal& bv2 = *m_bval[m_bvals - 1]; - if (bv1.cmp(bv2) > 0 && urandom(100) != 0) + if (bv1.cmp(par, bv2) > 0 && urandom(100) != 0) continue; } } while (0); @@ -2848,7 +3002,7 @@ BSet::setbnd(Par par) const } void -BSet::filter(const Set& set, Set& set2) const +BSet::filter(Par par, const Set& set, Set& set2) const { const Tab& tab = m_tab; const ITab& itab = m_itab; @@ -2880,7 +3034,7 @@ BSet::filter(const Set& set, Set& set2) const const ICol& icol = bval.m_icol; const Col& col = icol.m_col; const Val& val = *row.m_val[col.m_num]; - int ret = bval.cmp(val); + int ret = bval.cmp(par, val); LL5("cmp: ret=" << ret << " " << bval << " vs " << val); if (bval.m_type == 0) ok2 = (ret <= 0); @@ -3110,7 +3264,7 @@ pkread(Par par) con.closeTransaction(); } if (par.m_verify) - CHK(set1.verify(set2) == 0); + CHK(set1.verify(par, set2) == 0); return 0; } @@ -3275,7 +3429,7 @@ hashindexread(Par par, const ITab& itab) con.closeTransaction(); } if (par.m_verify) - CHK(set1.verify(set2) == 0); + CHK(set1.verify(par, set2) == 0); return 0; } @@ -3289,12 +3443,11 @@ scanreadtable(Par par) const Set& set = par.set(); // expected const Set& set1 = set; - LL3("scanread " << tab.m_name << " verify=" << par.m_verify); - LL4("expect " << set.count() << " rows"); + LL3("scanread " << tab.m_name << " lockmode=" << par.m_lockmode << " expect=" << set1.count() << " verify=" << par.m_verify); Set set2(tab, set.m_rows); CHK(con.startTransaction() == 0); CHK(con.getNdbScanOperation(tab) == 0); - CHK(con.openScanRead(par.m_scanbat, par.m_scanpar) == 0); + CHK(con.readTuples(par) == 0); set2.getval(par); CHK(con.executeScan() == 0); unsigned n = 0; @@ -3317,7 +3470,7 @@ scanreadtable(Par par) } con.closeTransaction(); if (par.m_verify) - CHK(set1.verify(set2) == 0); + CHK(set1.verify(par, set2) == 0); LL3("scanread " << tab.m_name << " done rows=" << n); return 0; } @@ -3331,7 +3484,7 @@ scanreadtablefast(Par par, unsigned countcheck) LL3("scanfast " << tab.m_name); CHK(con.startTransaction() == 0); CHK(con.getNdbScanOperation(tab) == 0); - CHK(con.openScanRead(par.m_scanbat, par.m_scanpar) == 0); + CHK(con.readTuples(par) == 0); // get 1st column NdbRecAttr* rec; CHK(con.getValue((Uint32)0, rec) == 0); @@ -3355,12 +3508,11 @@ scanreadindex(Par par, const ITab& itab, BSet& bset, bool calc) Con& con = par.con(); const Tab& tab = par.tab(); const Set& set = par.set(); - LL4(bset); Set set1(tab, set.m_rows); if (calc) { while (true) { bset.calc(par); - bset.filter(set, set1); + bset.filter(par, set, set1); unsigned n = set1.count(); // prefer proper subset if (0 < n && n < set.m_rows) @@ -3370,17 +3522,13 @@ scanreadindex(Par par, const ITab& itab, BSet& bset, bool calc) set1.reset(); } } else { - bset.filter(set, set1); + bset.filter(par, set, set1); } - LL3("scanread " << itab.m_name << " bounds=" << bset << " verify=" << par.m_verify << " ordered=" << par.m_ordered << " descending=" << par.m_descending); - LL4("expect " << set1.count() << " rows"); + LL3("scanread " << itab.m_name << " " << bset << " lockmode=" << par.m_lockmode << " expect=" << set1.count() << " verify=" << par.m_verify << " ordered=" << par.m_ordered << " descending=" << par.m_descending); Set set2(tab, set.m_rows); CHK(con.startTransaction() == 0); - CHK(con.getNdbScanOperation(itab, tab) == 0); - if (! par.m_ordered) - CHK(con.openScanRead(par.m_scanbat, par.m_scanpar) == 0); - else - CHK(con.openScanOrdered(par.m_scanbat, par.m_scanpar, par.m_descending) == 0); + CHK(con.getNdbIndexScanOperation(itab, tab) == 0); + CHK(con.readIndexTuples(par) == 0); CHK(bset.setbnd(par) == 0); set2.getval(par); CHK(con.executeScan() == 0); @@ -3404,9 +3552,9 @@ scanreadindex(Par par, const ITab& itab, BSet& bset, bool calc) } con.closeTransaction(); if (par.m_verify) { - CHK(set1.verify(set2) == 0); + CHK(set1.verify(par, set2) == 0); if (par.m_ordered) - CHK(set2.verifyorder(itab, par.m_descending) == 0); + CHK(set2.verifyorder(par, itab, par.m_descending) == 0); } LL3("scanread " << itab.m_name << " done rows=" << n); return 0; @@ -3418,11 +3566,11 @@ scanreadindexfast(Par par, const ITab& itab, const BSet& bset, unsigned countche Con& con = par.con(); const Tab& tab = par.tab(); const Set& set = par.set(); - LL3("scanfast " << itab.m_name << " bounds=" << bset.m_bvals); + LL3("scanfast " << itab.m_name << " " << bset); LL4(bset); CHK(con.startTransaction() == 0); - CHK(con.getNdbScanOperation(itab, tab) == 0); - CHK(con.openScanRead(par.m_scanbat, par.m_scanpar) == 0); + CHK(con.getNdbIndexScanOperation(itab, tab) == 0); + CHK(con.readIndexTuples(par) == 0); CHK(bset.setbnd(par) == 0); // get 1st column NdbRecAttr* rec; @@ -3544,9 +3692,10 @@ scanupdatetable(Par par) Set& set = par.set(); LL3("scan update " << tab.m_name); Set set2(tab, set.m_rows); + par.m_lockmode = NdbOperation::LM_Exclusive; CHK(con.startTransaction() == 0); CHK(con.getNdbScanOperation(tab) == 0); - CHK(con.openScanExclusive(par.m_scanbat, par.m_scanpar) == 0); + CHK(con.readTuples(par) == 0); set2.getval(par); CHK(con.executeScan() == 0); unsigned count = 0; @@ -3641,12 +3790,10 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) Set& set = par.set(); LL3("scan update " << itab.m_name); Set set2(tab, set.m_rows); + par.m_lockmode = NdbOperation::LM_Exclusive; CHK(con.startTransaction() == 0); - CHK(con.getNdbScanOperation(itab, tab) == 0); - if (! par.m_ordered) - CHK(con.openScanExclusive(par.m_scanbat, par.m_scanpar) == 0); - else - CHK(con.openScanOrderedExclusive(par.m_scanbat, par.m_scanpar, par.m_descending) == 0); + CHK(con.getNdbIndexScanOperation(itab, tab) == 0); + CHK(con.readTuples(par) == 0); CHK(bset.setbnd(par) == 0); set2.getval(par); CHK(con.executeScan() == 0); @@ -3777,6 +3924,7 @@ readverify(Par par) if (par.m_noverify) return 0; par.m_verify = true; + par.m_lockmode = NdbOperation::LM_CommittedRead; CHK(pkread(par) == 0); CHK(scanreadall(par) == 0); return 0; @@ -3788,19 +3936,24 @@ readverifyfull(Par par) if (par.m_noverify) return 0; par.m_verify = true; - if (par.m_no == 0) + par.m_lockmode = NdbOperation::LM_CommittedRead; + const Tab& tab = par.tab(); + if (par.m_no == 0) { + // thread 0 scans table CHK(scanreadtable(par) == 0); - else { - const Tab& tab = par.tab(); - unsigned i = par.m_no - 1; - if (i < tab.m_itabs && tab.m_itab[i] != 0) { - const ITab& itab = *tab.m_itab[i]; - if (itab.m_type == ITab::OrderedIndex) { - BSet bset(tab, itab, par.m_rows); - CHK(scanreadindex(par, itab, bset, false) == 0); - } else { - CHK(hashindexread(par, itab) == 0); - } + } + // each thread scans different indexes + for (unsigned i = 0; i < tab.m_itabs; i++) { + if (i % par.m_threads != par.m_no) + continue; + if (tab.m_itab[i] == 0) + continue; + const ITab& itab = *tab.m_itab[i]; + if (itab.m_type == ITab::OrderedIndex) { + BSet bset(tab, itab, par.m_rows); + CHK(scanreadindex(par, itab, bset, false) == 0); + } else { + CHK(hashindexread(par, itab) == 0); } } return 0; @@ -3809,7 +3962,10 @@ readverifyfull(Par par) static int readverifyindex(Par par) { + if (par.m_noverify) + return 0; par.m_verify = true; + par.m_lockmode = NdbOperation::LM_CommittedRead; unsigned sel = urandom(10); if (sel < 9) { par.m_ordered = true; @@ -4411,15 +4567,30 @@ printtables() { Par par(g_opt); makebuiltintables(par); - ndbout << "builtin tables (x0 on pk, x=ordered z=hash):" << endl; + ndbout << "tables and indexes (x=ordered z=hash x0=on pk):" << endl; for (unsigned j = 0; j < tabcount; j++) { if (tablist[j] == 0) continue; const Tab& tab = *tablist[j]; - ndbout << " " << tab.m_name; + const char* tname = tab.m_name; + ndbout << " " << tname; for (unsigned i = 0; i < tab.m_itabs; i++) { + if (tab.m_itab[i] == 0) + continue; const ITab& itab = *tab.m_itab[i]; - ndbout << " " << itab.m_name; + const char* iname = itab.m_name; + if (strncmp(tname, iname, strlen(tname)) == 0) + iname += strlen(tname); + ndbout << " " << iname; + ndbout << "("; + for (unsigned k = 0; k < itab.m_icols; k++) { + if (k != 0) + ndbout << ","; + const ICol& icol = *itab.m_icol[k]; + const Col& col = icol.m_col; + ndbout << col.m_name; + } + ndbout << ")"; } ndbout << endl; } @@ -4434,9 +4605,12 @@ runtest(Par par) unsigned short seed = (getpid() ^ time(0)); LL1("random seed: " << seed); srandom((unsigned)seed); - } else if (par.m_seed != 0) + } else if (par.m_seed != 0) { LL1("random seed: " << par.m_seed); srandom(par.m_seed); + } else { + LL1("random seed: loop number"); + } // cs assert(par.m_csname != 0); if (strcmp(par.m_csname, "random") != 0) { diff --git a/ndb/test/ndbapi/test_event.cpp b/ndb/test/ndbapi/test_event.cpp index cb2793e42b9..567cd1581f5 100644 --- a/ndb/test/ndbapi/test_event.cpp +++ b/ndb/test/ndbapi/test_event.cpp @@ -32,6 +32,46 @@ int runCreateEvent(NDBT_Context* ctx, NDBT_Step* step) return NDBT_OK; } +int runCreateDropEventOperation(NDBT_Context* ctx, NDBT_Step* step) +{ + int loops = ctx->getNumLoops(); + int records = ctx->getNumRecords(); + HugoTransactions hugoTrans(*ctx->getTab()); + EventOperationStats stats; + + Ndb *pNdb=GETNDB(step); + const NdbDictionary::Table& tab= *ctx->getTab(); + NdbEventOperation *pOp; + char eventName[1024]; + sprintf(eventName,"%s_EVENT",tab.getName()); + int noEventColumnName = tab.getNoOfColumns(); + + for (int i= 0; i < loops; i++) + { +#if 1 + if (hugoTrans.eventOperation(GETNDB(step), (void*)&stats, 0) != 0){ + return NDBT_FAILED; + } +#else + g_info << "create EventOperation\n"; + pOp = pNdb->createEventOperation(eventName, 100); + if ( pOp == NULL ) { + g_err << "Event operation creation failed\n"; + return NDBT_FAILED; + } + + g_info << "dropping event operation" << endl; + int res = pNdb->dropEventOperation(pOp); + if (res != 0) { + g_err << "operation execution failed\n"; + return NDBT_FAILED; + } +#endif + } + + return NDBT_OK; +} + int theThreadIdCounter = 0; int runEventOperation(NDBT_Context* ctx, NDBT_Step* step) @@ -122,6 +162,13 @@ TESTCASE("BasicEventOperation", STEP(runEventLoad); FINALIZER(runDropEvent); } +TESTCASE("CreateDropEventOperation", + "Verify that we can Create and Drop many times" + "NOTE! No errors are allowed!" ){ + INITIALIZER(runCreateEvent); + STEP(runCreateDropEventOperation); + FINALIZER(runDropEvent); +} NDBT_TESTSUITE_END(test_event); #if 0 diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index f20f1e3f4cc..559ea037c1f 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -456,7 +456,7 @@ max-time: 150000 cmd: testOperations args: -max-time: 1500 +max-time: 15000 cmd: testTransactions args: diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp index d9207386bf0..b76ac088793 100644 --- a/ndb/test/src/HugoTransactions.cpp +++ b/ndb/test/src/HugoTransactions.cpp @@ -826,6 +826,7 @@ struct receivedEvent { }; int XXXXX = 0; + int HugoTransactions::eventOperation(Ndb* pNdb, void* pstats, int records) { @@ -896,7 +897,9 @@ HugoTransactions::eventOperation(Ndb* pNdb, void* pstats, // set up the callbacks g_info << function << "execute\n"; if (pOp->execute()) { // This starts changes to "start flowing" - g_err << function << "operation execution failed\n"; + g_err << function << "operation execution failed: \n"; + g_err << pOp->getNdbError().code << " " + << pOp->getNdbError().message << endl; return NDBT_FAILED; } diff --git a/ndb/test/src/NDBT_Test.cpp b/ndb/test/src/NDBT_Test.cpp index d86cfbf3c70..45ee172811a 100644 --- a/ndb/test/src/NDBT_Test.cpp +++ b/ndb/test/src/NDBT_Test.cpp @@ -15,6 +15,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include +#include #include "NDBT.hpp" #include "NDBT_Test.hpp" @@ -476,7 +477,9 @@ extern "C" void * runStep_C(void * s) { + my_thread_init(); runStep(s); + my_thread_end(); NdbThread_Exit(0); return NULL; } diff --git a/ndb/tools/delete_all.cpp b/ndb/tools/delete_all.cpp index 250fe85d546..38a9cdd9354 100644 --- a/ndb/tools/delete_all.cpp +++ b/ndb/tools/delete_all.cpp @@ -80,19 +80,21 @@ int main(int argc, char** argv){ Ndb_cluster_connection con(opt_connect_str); if(con.connect(12, 5, 1) != 0) { + ndbout << "Unable to connect to management server." << endl; + return NDBT_ProgramExit(NDBT_FAILED); + } + if (con.wait_until_ready(30,0) < 0) + { + ndbout << "Cluster nodes not ready in 30 seconds." << endl; return NDBT_ProgramExit(NDBT_FAILED); } - Ndb MyNdb(&con, _dbname ); + Ndb MyNdb(&con, _dbname ); if(MyNdb.init() != 0){ ERR(MyNdb.getNdbError()); return NDBT_ProgramExit(NDBT_FAILED); } - // Connect to Ndb and wait for it to become ready - while(MyNdb.waitUntilReady() != 0) - ndbout << "Waiting for ndb to become ready..." << endl; - // Check if table exists in db int res = NDBT_OK; for(int i = 0; ireadTuplesExclusive(par) ) { + if( pOp->readTuples(NdbOperation::LM_Exclusive,par) ) { goto failed; } - if(pTrans->execute(NoCommit) != 0){ + if(pTrans->execute(NdbTransaction::NoCommit) != 0){ err = pTrans->getNdbError(); if(err.status == NdbError::TemporaryError){ ERR(err); @@ -172,7 +174,7 @@ int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab, int parallelism) } while((check = pOp->nextResult(false)) == 0); if(check != -1){ - check = pTrans->execute(Commit); + check = pTrans->execute(NdbTransaction::Commit); pTrans->restart(); } diff --git a/ndb/tools/desc.cpp b/ndb/tools/desc.cpp index 8c84802ef51..b18f97a05b1 100644 --- a/ndb/tools/desc.cpp +++ b/ndb/tools/desc.cpp @@ -80,19 +80,22 @@ int main(int argc, char** argv){ Ndb_cluster_connection con(opt_connect_str); if(con.connect(12, 5, 1) != 0) { + ndbout << "Unable to connect to management server." << endl; + return NDBT_ProgramExit(NDBT_FAILED); + } + if (con.wait_until_ready(30,0) < 0) + { + ndbout << "Cluster nodes not ready in 30 seconds." << endl; return NDBT_ProgramExit(NDBT_FAILED); } - Ndb* pMyNdb = new Ndb(&con, _dbname); - pMyNdb->init(); - - ndbout << "Waiting..."; - while (pMyNdb->waitUntilReady() != 0) { - ndbout << "..."; + Ndb MyNdb(&con, _dbname); + if(MyNdb.init() != 0){ + ERR(MyNdb.getNdbError()); + return NDBT_ProgramExit(NDBT_FAILED); } - ndbout << endl; - - NdbDictionary::Dictionary * dict = pMyNdb->getDictionary(); + + const NdbDictionary::Dictionary * dict= MyNdb.getDictionary(); for (int i = 0; i < argc; i++) { NDBT_Table* pTab = (NDBT_Table*)dict->getTable(argv[i]); if (pTab != 0){ @@ -132,6 +135,5 @@ int main(int argc, char** argv){ ndbout << argv[i] << ": " << dict->getNdbError() << endl; } - delete pMyNdb; return NDBT_ProgramExit(NDBT_OK); } diff --git a/ndb/tools/drop_index.cpp b/ndb/tools/drop_index.cpp index cc0dd9a8be6..47baee0b66f 100644 --- a/ndb/tools/drop_index.cpp +++ b/ndb/tools/drop_index.cpp @@ -83,16 +83,18 @@ int main(int argc, char** argv){ { return NDBT_ProgramExit(NDBT_FAILED); } - Ndb MyNdb(&con, _dbname ); + if (con.wait_until_ready(30,0) < 0) + { + ndbout << "Cluster nodes not ready in 30 seconds." << endl; + return NDBT_ProgramExit(NDBT_FAILED); + } + Ndb MyNdb(&con, _dbname ); if(MyNdb.init() != 0){ ERR(MyNdb.getNdbError()); return NDBT_ProgramExit(NDBT_FAILED); } - while(MyNdb.waitUntilReady() != 0) - ndbout << "Waiting for ndb to become ready..." << endl; - int res = 0; for(int i = 0; iconnect(12,5,1)) - fatal("unable to connect"); + fatal("Unable to connect to management server."); + if (ndb_cluster_connection->wait_until_ready(30,0) < 0) + fatal("Cluster nodes not ready in 30 seconds."); + ndb = new Ndb(ndb_cluster_connection, _dbname); if (ndb->init() != 0) fatal("init"); - if (ndb->waitUntilReady(30) < 0) - fatal("waitUntilReady"); dic = ndb->getDictionary(); for (int i = 0; _loops == 0 || i < _loops; i++) { list(_tabname, (NdbDictionary::Object::Type)_type); } + delete ndb; + delete ndb_cluster_connection; return NDBT_ProgramExit(NDBT_OK); } diff --git a/ndb/tools/restore/consumer_restore.cpp b/ndb/tools/restore/consumer_restore.cpp index 2a5cda29027..d72b82569e2 100644 --- a/ndb/tools/restore/consumer_restore.cpp +++ b/ndb/tools/restore/consumer_restore.cpp @@ -375,7 +375,8 @@ void BackupRestore::tuple_a(restore_callback_t *cb) } // Prepare transaction (the transaction is NOT yet sent to NDB) - cb->connection->executeAsynchPrepare(Commit, &callback, cb); + cb->connection->executeAsynchPrepare(NdbTransaction::Commit, + &callback, cb); m_transactions++; return; } @@ -543,7 +544,7 @@ BackupRestore::logEntry(const LogEntry & tup) op->setValue(attr->Desc->attrId, dataPtr, length); } - const int ret = trans->execute(Commit); + const int ret = trans->execute(NdbTransaction::Commit); if (ret != 0) { // Both insert update and delete can fail during log running @@ -654,7 +655,7 @@ BackupRestore::tuple(const TupleS & tup) else op->setValue(i, dataPtr, length); } - int ret = trans->execute(Commit); + int ret = trans->execute(NdbTransaction::Commit); if (ret != 0) { ndbout << "execute failed: "; diff --git a/ndb/tools/select_all.cpp b/ndb/tools/select_all.cpp index 94007d422e1..ecb7db91060 100644 --- a/ndb/tools/select_all.cpp +++ b/ndb/tools/select_all.cpp @@ -24,7 +24,6 @@ #include #include #include -#include int scanReadRecords(Ndb*, const NdbDictionary::Table*, @@ -127,19 +126,21 @@ int main(int argc, char** argv){ Ndb_cluster_connection con(opt_connect_str); if(con.connect(12, 5, 1) != 0) { + ndbout << "Unable to connect to management server." << endl; + return NDBT_ProgramExit(NDBT_FAILED); + } + if (con.wait_until_ready(30,0) < 0) + { + ndbout << "Cluster nodes not ready in 30 seconds." << endl; return NDBT_ProgramExit(NDBT_FAILED); } - Ndb MyNdb(&con, _dbname ); + Ndb MyNdb(&con, _dbname ); if(MyNdb.init() != 0){ ERR(MyNdb.getNdbError()); return NDBT_ProgramExit(NDBT_FAILED); } - // Connect to Ndb and wait for it to become ready - while(MyNdb.waitUntilReady() != 0) - ndbout << "Waiting for ndb to become ready..." << endl; - // Check if table exists in db const NdbDictionary::Table* pTab = NDBT_Table::discoverTableFromDb(&MyNdb, _tabname); const NdbDictionary::Index * pIdx = 0; @@ -320,7 +321,7 @@ int scanReadRecords(Ndb* pNdb, } } - check = pTrans->execute(NoCommit); + check = pTrans->execute(NdbTransaction::NoCommit); if( check == -1 ) { const NdbError err = pTrans->getNdbError(); diff --git a/ndb/tools/select_count.cpp b/ndb/tools/select_count.cpp index abba39c4d2e..f2b78de4b37 100644 --- a/ndb/tools/select_count.cpp +++ b/ndb/tools/select_count.cpp @@ -100,19 +100,21 @@ int main(int argc, char** argv){ Ndb_cluster_connection con(opt_connect_str); if(con.connect(12, 5, 1) != 0) { + ndbout << "Unable to connect to management server." << endl; + return NDBT_ProgramExit(NDBT_FAILED); + } + if (con.wait_until_ready(30,0) < 0) + { + ndbout << "Cluster nodes not ready in 30 seconds." << endl; return NDBT_ProgramExit(NDBT_FAILED); } - Ndb MyNdb(&con, _dbname ); + Ndb MyNdb(&con, _dbname ); if(MyNdb.init() != 0){ ERR(MyNdb.getNdbError()); return NDBT_ProgramExit(NDBT_FAILED); } - // Connect to Ndb and wait for it to become ready - while(MyNdb.waitUntilReady() != 0) - ndbout << "Waiting for ndb to become ready..." << endl; - for(int i = 0; igetValue(NdbDictionary::Column::ROW_COUNT, (char*)&tmp); pOp->getValue(NdbDictionary::Column::ROW_SIZE, (char*)&row_size); - check = pTrans->execute(NoCommit); + check = pTrans->execute(NdbTransaction::NoCommit); if( check == -1 ) { ERR(pTrans->getNdbError()); pNdb->closeTransaction(pTrans); diff --git a/scripts/mysql_config.sh b/scripts/mysql_config.sh index 86cbe944416..90418de3d1d 100644 --- a/scripts/mysql_config.sh +++ b/scripts/mysql_config.sh @@ -88,7 +88,7 @@ client_libs='@CLIENT_LIBS@' libs="$ldflags -L$pkglibdir -lmysqlclient $client_libs" libs=`echo "$libs" | sed -e 's; \+; ;g' | sed -e 's;^ *;;' | sed -e 's; *\$;;'` -libs_r="$ldflags -L$pkglibdir -lmysqlclient_r @LIBS@ @openssl_libs@" +libs_r="$ldflags -L$pkglibdir -lmysqlclient_r @LIBS@ @ZLIB_LIBS@ @openssl_libs@" libs_r=`echo "$libs_r" | sed -e 's; \+; ;g' | sed -e 's;^ *;;' | sed -e 's; *\$;;'` cflags="-I$pkgincludedir @CFLAGS@ " #note: end space! include="-I$pkgincludedir" @@ -100,7 +100,9 @@ for remove in DDBUG_OFF DSAFEMALLOC USAFEMALLOC DSAFE_MUTEX \ DPEDANTIC_SAFEMALLOC DUNIV_MUST_NOT_INLINE DFORCE_INIT_OF_VARS \ DEXTRA_DEBUG DHAVE_purify 'O[0-9]' 'W[-A-Za-z]*' do - cflags=`echo "$cflags"|sed -e "s/-$remove *//g"` + # The first option we might strip will always have a space before it because + # we set -I$pkgincludedir as the first option + cflags=`echo "$cflags"|sed -e "s/ -$remove */ /g"` done cflags=`echo "$cflags"|sed -e 's/ *\$//'` diff --git a/scripts/mysqld_multi.sh b/scripts/mysqld_multi.sh index ba46fd6fa29..ee873a86c8d 100644 --- a/scripts/mysqld_multi.sh +++ b/scripts/mysqld_multi.sh @@ -4,7 +4,7 @@ use Getopt::Long; use POSIX qw(strftime); $|=1; -$VER="2.11"; +$VER="2.12"; $opt_config_file = undef(); $opt_example = 0; @@ -430,6 +430,16 @@ sub find_groups { $data[$i] = $line; } + if (defined($ENV{MYSQL_HOME}) && -f "$ENV{MYSQL_HOME}/my.cnf" && + -r "$ENV{MYSQL_HOME}/my.cnf") + { + open(MY_CNF, "<$ENV{MYSQL_HOME}/my.cnf") && (@tmp=) && + close(MY_CNF); + } + for (; ($line = shift @tmp); $i++) + { + $data[$i] = $line; + } if (-f "$homedir/.my.cnf" && -r "$homedir/.my.cnf") { open(MY_CNF, "<$homedir/.my.cnf") && (@tmp=) && close(MY_CNF); diff --git a/scripts/mysqld_safe.sh b/scripts/mysqld_safe.sh index 1f4d17f8885..d6ee11f226d 100644 --- a/scripts/mysqld_safe.sh +++ b/scripts/mysqld_safe.sh @@ -52,11 +52,7 @@ parse_arguments() { # mysqld_safe-specific options - must be set in my.cnf ([mysqld_safe])! --ledir=*) ledir=`echo "$arg" | sed -e "s;--ledir=;;"` ;; - # err-log should be removed in 5.0 - --err-log=*) err_log=`echo "$arg" | sed -e "s;--err-log=;;"` ;; --log-error=*) err_log=`echo "$arg" | sed -e "s;--log-error=;;"` ;; - # QQ The --open-files should be removed in 5.0 - --open-files=*) open_files=`echo "$arg" | sed -e "s;--open-files=;;"` ;; --open-files-limit=*) open_files=`echo "$arg" | sed -e "s;--open-files-limit=;;"` ;; --core-file-size=*) core_file_size=`echo "$arg" | sed -e "s;--core-file-size=;;"` ;; --timezone=*) TZ=`echo "$arg" | sed -e "s;--timezone=;;"` ; export TZ; ;; @@ -86,7 +82,7 @@ parse_arguments() { MY_PWD=`pwd` # Check if we are starting this relative (for the binary release) -if test -d $MY_PWD/data/mysql -a -f ./share/mysql/english/errmsg.sys -a \ +if test -f ./share/mysql/english/errmsg.sys -a \ -x ./bin/mysqld then MY_BASEDIR_VERSION=$MY_PWD # Where bin, share and data are @@ -94,10 +90,10 @@ then DATADIR=$MY_BASEDIR_VERSION/data if test -z "$defaults" then - defaults="--defaults-extra-file=$MY_BASEDIR_VERSION/data/my.cnf" + defaults="--defaults-extra-file=$DATADIR/my.cnf" fi # Check if this is a 'moved install directory' -elif test -f ./var/mysql/db.frm -a -f ./share/mysql/english/errmsg.sys -a \ +elif test -f ./share/mysql/english/errmsg.sys -a \ -x ./libexec/mysqld then MY_BASEDIR_VERSION=$MY_PWD # Where libexec, share and var are @@ -106,8 +102,17 @@ then else MY_BASEDIR_VERSION=@prefix@ DATADIR=@localstatedir@ + if test -z "$MYSQL_HOME" + then + MYSQL_HOME=$DATADIR # Installation in a not common path + fi ledir=@libexecdir@ fi +if test -z "$MYSQL_HOME" +then + MYSQL_HOME=$MY_BASEDIR_VERSION +fi +export MYSQL_HOME user=@MYSQLD_USER@ niceness=0 @@ -322,13 +327,13 @@ do # but should work for the rest of the servers. # The only thing is ps x => redhat 5 gives warnings when using ps -x. # kill -9 is used or the process won't react on the kill. - numofproces=`ps xa | grep -v "grep" | grep "$ledir/$MYSQLD\>" | grep -c "pid-file=$pid_file"` + numofproces=`ps xaww | grep -v "grep" | grep "$ledir/$MYSQLD\>" | grep -c "pid-file=$pid_file"` echo -e "\nNumber of processes running now: $numofproces" | tee -a $err_log I=1 while test "$I" -le "$numofproces" do - PROC=`ps xa | grep "$ledir/$MYSQLD\>" | grep -v "grep" | grep "pid-file=$pid_file" | sed -n '$p'` + PROC=`ps xaww | grep "$ledir/$MYSQLD\>" | grep -v "grep" | grep "pid-file=$pid_file" | sed -n '$p'` for T in $PROC do diff --git a/sql-bench/crash-me.sh b/sql-bench/crash-me.sh index 8ec62442b11..a40ef8fbc7d 100644 --- a/sql-bench/crash-me.sh +++ b/sql-bench/crash-me.sh @@ -1039,7 +1039,7 @@ try_and_report("Automatic row id", "automatic_rowid", ["MATCH UNIQUE","match_unique", "1 match unique (select a from crash_me)",1,0], ["MATCH","match","1 match (select a from crash_me)",1,0], - ["MATCHES","matches","b matcjhes 'a*'",1,0], + ["MATCHES","matches","b matches 'a*'",1,0], ["NOT BETWEEN","not_between","7 not between 4 and 6",1,0], ["NOT EXISTS","not_exists", "not exists (select * from crash_me where a = 2)",1,0], diff --git a/sql-common/client.c b/sql-common/client.c index 0ff98102da9..9e08e8c0f59 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -880,7 +880,7 @@ static const char *default_options[]= "connect-timeout", "local-infile", "disable-local-infile", "replication-probe", "enable-reads-from-master", "repl-parse-query", "ssl-cipher", "max-allowed-packet", "protocol", "shared-memory-base-name", - "multi-results", "multi-queries", "secure-auth", + "multi-results", "multi-statements", "multi-queries", "secure-auth", "report-data-truncation", NullS }; @@ -1087,12 +1087,13 @@ void mysql_read_default_options(struct st_mysql_options *options, options->client_flag|= CLIENT_MULTI_RESULTS; break; case 31: + case 32: options->client_flag|= CLIENT_MULTI_STATEMENTS | CLIENT_MULTI_RESULTS; break; - case 32: /* secure-auth */ + case 33: /* secure-auth */ options->secure_auth= TRUE; break; - case 33: /* report-data-truncation */ + case 34: /* report-data-truncation */ options->report_data_truncation= opt_arg ? test(atoi(opt_arg)) : 1; break; default: diff --git a/sql-common/my_time.c b/sql-common/my_time.c index 45adb657f73..1078259f15d 100644 --- a/sql-common/my_time.c +++ b/sql-common/my_time.c @@ -382,8 +382,7 @@ str_to_datetime(const char *str, uint length, MYSQL_TIME *l_time, } l_time->neg= 0; - if (year_length == 2 && i >= format_position[1] && i >=format_position[2] && - (l_time->month || l_time->day)) + if (year_length == 2 && not_zero_date) l_time->year+= (l_time->year < YY_PART_YEAR ? 2000 : 1900); if (!not_zero_date && (flags & TIME_NO_ZERO_DATE)) @@ -911,14 +910,14 @@ longlong number_to_datetime(longlong nr, MYSQL_TIME *time_res, goto err; if (nr <= (YY_PART_YEAR-1)*10000L+1231L) { - nr= (nr+20000000L)*1000000L; // YYMMDD, year: 2000-2069 + nr= (nr+20000000L)*1000000L; /* YYMMDD, year: 2000-2069 */ goto ok; } if (nr < (YY_PART_YEAR)*10000L+101L) goto err; if (nr <= 991231L) { - nr= (nr+19000000L)*1000000L; // YYMMDD, year: 1970-1999 + nr= (nr+19000000L)*1000000L; /* YYMMDD, year: 1970-1999 */ goto ok; } if (nr < 10000101L) @@ -932,13 +931,13 @@ longlong number_to_datetime(longlong nr, MYSQL_TIME *time_res, goto err; if (nr <= (YY_PART_YEAR-1)*LL(10000000000)+LL(1231235959)) { - nr= nr+LL(20000000000000); // YYMMDDHHMMSS, 2000-2069 + nr= nr+LL(20000000000000); /* YYMMDDHHMMSS, 2000-2069 */ goto ok; } if (nr < YY_PART_YEAR*LL(10000000000)+ LL(101000000)) goto err; if (nr <= LL(991231235959)) - nr= nr+LL(19000000000000); // YYMMDDHHMMSS, 1970-1999 + nr= nr+LL(19000000000000); /* YYMMDDHHMMSS, 1970-1999 */ ok: part1=(long) (nr/LL(1000000)); diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc index f79ca903e7a..f04853fd482 100644 --- a/sql/examples/ha_archive.cc +++ b/sql/examples/ha_archive.cc @@ -520,7 +520,7 @@ error: int ha_archive::write_row(byte * buf) { z_off_t written; - Field_blob **field; + uint *ptr, *end; DBUG_ENTER("ha_archive::write_row"); if (share->crashed) @@ -530,25 +530,27 @@ int ha_archive::write_row(byte * buf) if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); pthread_mutex_lock(&share->mutex); - written= gzwrite(share->archive_write, buf, table->reclength); - DBUG_PRINT("ha_archive::write_row", ("Wrote %d bytes expected %d", written, table->reclength)); + written= gzwrite(share->archive_write, buf, table->s->reclength); + DBUG_PRINT("ha_archive::write_row", ("Wrote %d bytes expected %d", written, table->s->reclength)); if (!delayed_insert || !bulk_insert) share->dirty= TRUE; - if (written != table->reclength) + if (written != table->s->reclength) goto error; /* We should probably mark the table as damagaged if the record is written but the blob fails. */ - for (field= table->blob_field ; *field ; field++) + for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ; + ptr != end ; + ptr++) { char *ptr; - uint32 size= (*field)->get_length(); + uint32 size= ((Field_blob*) table->field[*ptr])->get_length(); if (size) { - (*field)->get_ptr(&ptr); + ((Field_blob*) table->field[*ptr])->get_ptr(&ptr); written= gzwrite(share->archive_write, ptr, (unsigned)size); if (written != size) goto error; @@ -614,13 +616,13 @@ int ha_archive::rnd_init(bool scan) int ha_archive::get_row(gzFile file_to_read, byte *buf) { int read; // Bytes read, gzread() returns int + uint *ptr, *end; char *last; size_t total_blob_length= 0; - Field_blob **field; DBUG_ENTER("ha_archive::get_row"); - read= gzread(file_to_read, buf, table->reclength); - DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, table->reclength)); + read= gzread(file_to_read, buf, table->s->reclength); + DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, table->s->reclength)); if (read == Z_STREAM_ERROR) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); @@ -633,27 +635,31 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf) If the record is the wrong size, the file is probably damaged, unless we are dealing with a delayed insert or a bulk insert. */ - if ((ulong) read != table->reclength) + if ((ulong) read != table->s->reclength) DBUG_RETURN(HA_ERR_END_OF_FILE); /* Calculate blob length, we use this for our buffer */ - for (field=table->blob_field; *field ; field++) - total_blob_length += (*field)->get_length(); + for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ; + ptr != end ; + ptr++) + total_blob_length += ((Field_blob*) table->field[*ptr])->get_length(); /* Adjust our row buffer if we need be */ buffer.alloc(total_blob_length); last= (char *)buffer.ptr(); /* Loop through our blobs and read them */ - for (field=table->blob_field; *field ; field++) + for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ; + ptr != end ; + ptr++) { - size_t size= (*field)->get_length(); + size_t size= ((Field_blob*) table->field[*ptr])->get_length(); if (size) { read= gzread(file_to_read, last, size); if ((size_t) read != size) DBUG_RETURN(HA_ERR_END_OF_FILE); - (*field)->set_ptr(size, last); + ((Field_blob*) table->field[*ptr])->set_ptr(size, last); last += size; } } @@ -753,8 +759,8 @@ int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt) I know, this malloc'ing memory but this should be a very rare event. */ - if (!(buf= (byte*) my_malloc(table->rec_buff_length > sizeof(ulonglong) +1 ? - table->rec_buff_length : sizeof(ulonglong) +1 , + if (!(buf= (byte*) my_malloc(table->s->rec_buff_length > sizeof(ulonglong) +1 ? + table->s->rec_buff_length : sizeof(ulonglong) +1 , MYF(MY_WME)))) { rc= HA_ERR_CRASHED_ON_USAGE; @@ -894,7 +900,7 @@ void ha_archive::info(uint flag) VOID(my_stat(share->data_file_name, &file_stat, MYF(MY_WME))); - mean_rec_length= table->reclength + buffer.alloced_length(); + mean_rec_length= table->s->reclength + buffer.alloced_length(); data_file_length= file_stat.st_size; create_time= file_stat.st_ctime; update_time= file_stat.st_mtime; diff --git a/sql/examples/ha_tina.cc b/sql/examples/ha_tina.cc index 39573a8f54c..46a22614566 100644 --- a/sql/examples/ha_tina.cc +++ b/sql/examples/ha_tina.cc @@ -375,7 +375,7 @@ int ha_tina::find_current_row(byte *buf) } next_position= (end_ptr - share->mapped_file)+1; /* Maybe use \N for null? */ - memset(buf, 0, table->null_bytes); /* We do not implement nulls! */ + memset(buf, 0, table->s->null_bytes); /* We do not implement nulls! */ DBUG_RETURN(0); } diff --git a/sql/field.cc b/sql/field.cc index a270f102cd5..d15db92e51f 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -351,10 +351,8 @@ String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_flag) } -/**************************************************************************** -** Functions for the base classes -** This is an unpacked number. -****************************************************************************/ +/* This is used as a table name when the table structure is not set up */ +const char *unknown_table_name= 0; Field::Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg, uchar null_bit_arg, @@ -362,7 +360,7 @@ Field::Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg, struct st_table *table_arg) :ptr(ptr_arg),null_ptr(null_ptr_arg), table(table_arg),orig_table(table_arg), - table_name(table_arg ? table_arg->table_name : 0), + table_name(table_arg ? &table_arg->alias : &unknown_table_name), field_name(field_name_arg), query_id(0), key_start(0), part_of_key(0), part_of_sortkey(0), unireg_check(unireg_check_arg), @@ -407,35 +405,24 @@ void Field_num::add_zerofill_and_unsigned(String &res) const res.append(" zerofill"); } -void Field_num::make_field(Send_field *field) +void Field::make_field(Send_field *field) { - /* table_cache_key is not set for temp tables */ - field->db_name= (orig_table->table_cache_key ? orig_table->table_cache_key : - ""); - field->org_table_name= orig_table->real_name; - field->table_name= orig_table->table_name; - field->col_name=field->org_col_name=field_name; + field->db_name= orig_table->s->table_cache_key; + field->org_table_name= orig_table->s->table_name; + field->table_name= orig_table->alias; + field->col_name= field->org_col_name= field_name; field->charsetnr= charset()->number; field->length=field_length; field->type=type(); field->flags=table->maybe_null ? (flags & ~NOT_NULL_FLAG) : flags; - field->decimals=dec; + field->decimals= 0; } -void Field_str::make_field(Send_field *field) +void Field_num::make_field(Send_field *field) { - /* table_cache_key is not set for temp tables */ - field->db_name= (orig_table->table_cache_key ? orig_table->table_cache_key : - ""); - field->org_table_name= orig_table->real_name; - field->table_name= orig_table->table_name; - field->col_name=field->org_col_name=field_name; - field->charsetnr= charset()->number; - field->length=field_length; - field->type=type(); - field->flags=table->maybe_null ? (flags & ~NOT_NULL_FLAG) : flags; - field->decimals=0; + Field::make_field(field); + field->decimals= dec; } @@ -448,7 +435,7 @@ uint Field::fill_cache_field(CACHE_FIELD *copy) { copy->blob_field=(Field_blob*) this; copy->strip=0; - copy->length-=table->blob_ptr_size; + copy->length-= table->s->blob_ptr_size; return copy->length; } else if (!zero_pack() && @@ -5189,7 +5176,7 @@ Field_blob::Field_blob(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, { flags|= BLOB_FLAG; if (table) - table->blob_fields++; + table->s->blob_fields++; } @@ -6295,22 +6282,6 @@ Field *Field_bit::new_key_field(MEM_ROOT *root, } -void Field_bit::make_field(Send_field *field) -{ - /* table_cache_key is not set for temp tables */ - field->db_name= (orig_table->table_cache_key ? orig_table->table_cache_key : - ""); - field->org_table_name= orig_table->real_name; - field->table_name= orig_table->table_name; - field->col_name= field->org_col_name= field_name; - field->charsetnr= charset()->number; - field->length= field_length; - field->type= type(); - field->flags= table->maybe_null ? (flags & ~NOT_NULL_FLAG) : flags; - field->decimals= 0; -} - - int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs) { int delta; @@ -6776,7 +6747,7 @@ create_field::create_field(Field *old_field,Field *orig_field) /* Fix if the original table had 4 byte pointer blobs */ if (flags & BLOB_FLAG) - pack_length= (pack_length- old_field->table->blob_ptr_size + + pack_length= (pack_length- old_field->table->s->blob_ptr_size + portable_sizeof_char_ptr); switch (sql_type) { @@ -6824,19 +6795,20 @@ create_field::create_field(Field *old_field,Field *orig_field) old_field->ptr && orig_field) { char buff[MAX_FIELD_WIDTH],*pos; - String tmp(buff,sizeof(buff), charset); + String tmp(buff,sizeof(buff), charset), *res; + my_ptrdiff_t diff; /* Get the value from default_values */ - my_ptrdiff_t diff= (my_ptrdiff_t) (orig_field->table->rec_buff_length*2); + diff= (my_ptrdiff_t) (orig_field->table->s->default_values- + orig_field->table->record[0]); orig_field->move_field(diff); // Points now at default_values bool is_null=orig_field->is_real_null(); - orig_field->val_str(&tmp); + res= orig_field->val_str(&tmp); orig_field->move_field(-diff); // Back to record[0] if (!is_null) { - pos= (char*) sql_memdup(tmp.ptr(),tmp.length()+1); - pos[tmp.length()]=0; - def= new Item_string(pos, tmp.length(), charset); + pos= (char*) sql_strmake(res->ptr(), res->length()); + def= new Item_string(pos, res->length(), charset); } } } diff --git a/sql/field.h b/sql/field.h index 9375fbc8d5a..9e690705801 100644 --- a/sql/field.h +++ b/sql/field.h @@ -47,7 +47,7 @@ public: */ struct st_table *table; // Pointer for table struct st_table *orig_table; // Pointer to original table - const char *table_name,*field_name; + const char **table_name, *field_name; LEX_STRING comment; ulong query_id; // For quick test of used fields /* Field is part of the following keys */ @@ -127,7 +127,7 @@ public: virtual void reset_fields() {} virtual void set_default() { - my_ptrdiff_t offset = (my_ptrdiff_t) (table->default_values - + my_ptrdiff_t offset = (my_ptrdiff_t) (table->s->default_values - table->record[0]); memcpy(ptr, ptr + offset, pack_length()); if (null_ptr) @@ -176,7 +176,7 @@ public: { if (null_ptr) null_ptr[row_offset]&= (uchar) ~null_bit; } inline bool maybe_null(void) { return null_ptr != 0 || table->maybe_null; } inline bool real_maybe_null(void) { return null_ptr != 0; } - virtual void make_field(Send_field *)=0; + virtual void make_field(Send_field *); virtual void sort_string(char *buff,uint length)=0; virtual bool optimize_range(uint idx, uint part); /* @@ -355,7 +355,6 @@ public: int store(double nr); int store(longlong nr)=0; int store(const char *to,uint length,CHARSET_INFO *cs)=0; - void make_field(Send_field *); uint size_of() const { return sizeof(*this); } CHARSET_INFO *charset(void) const { return field_charset; } void set_charset(CHARSET_INFO *charset) { field_charset=charset; } @@ -906,9 +905,9 @@ public: enum_field_types type() const { return ((orig_table && - orig_table->db_create_options & HA_OPTION_PACK_RECORD && + orig_table->s->db_create_options & HA_OPTION_PACK_RECORD && field_length >= 4) && - orig_table->frm_version < FRM_VER_TRUE_VARCHAR ? + orig_table->s->frm_version < FRM_VER_TRUE_VARCHAR ? MYSQL_TYPE_VAR_STRING : MYSQL_TYPE_STRING); } enum ha_base_keytype key_type() const @@ -1038,7 +1037,7 @@ public: uint32 key_length() const { return 0; } void sort_string(char *buff,uint length); uint32 pack_length() const - { return (uint32) (packlength+table->blob_ptr_size); } + { return (uint32) (packlength+table->s->blob_ptr_size); } inline uint32 max_data_length() const { return (uint32) (((ulonglong) 1 << (packlength*8)) -1); @@ -1212,7 +1211,6 @@ public: uint32 max_length() { return (uint32) field_length + (bit_len > 0); } uint size_of() const { return sizeof(*this); } Item_result result_type () const { return INT_RESULT; } - void make_field(Send_field *); void reset(void) { bzero(ptr, field_length); } int store(const char *to, uint length, CHARSET_INFO *charset); int store(double nr); diff --git a/sql/field_conv.cc b/sql/field_conv.cc index 9fd4f0228b3..b337ccd6306 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -487,16 +487,17 @@ void Copy_field::set(Field *to,Field *from,bool save) void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) { + bool compatible_db_low_byte_first= (to->table->s->db_low_byte_first == + from->table->s->db_low_byte_first); if (to->flags & BLOB_FLAG) { if (!(from->flags & BLOB_FLAG) || from->charset() != to->charset()) return do_conv_blob; - if (from_length != to_length || - to->table->db_low_byte_first != from->table->db_low_byte_first) + if (from_length != to_length || !compatible_db_low_byte_first) { // Correct pointer to point at char pointer - to_ptr+=to_length - to->table->blob_ptr_size; - from_ptr+=from_length- from->table->blob_ptr_size; + to_ptr+= to_length - to->table->s->blob_ptr_size; + from_ptr+= from_length- from->table->s->blob_ptr_size; return do_copy_blob; } } @@ -509,7 +510,7 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) if (from->result_type() == STRING_RESULT) { if (to->real_type() != from->real_type() || - to->table->db_low_byte_first != from->table->db_low_byte_first) + !compatible_db_low_byte_first) { if (from->real_type() == FIELD_TYPE_ENUM || from->real_type() == FIELD_TYPE_SET) @@ -541,7 +542,7 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) } else if (to->real_type() != from->real_type() || to_length != from_length || - to->table->db_low_byte_first != from->table->db_low_byte_first) + !compatible_db_low_byte_first) { if (to->real_type() == FIELD_TYPE_DECIMAL || to->result_type() == STRING_RESULT) @@ -552,8 +553,7 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) } else { - if (!to->eq_def(from) || - to->table->db_low_byte_first != from->table->db_low_byte_first) + if (!to->eq_def(from) || !compatible_db_low_byte_first) { if (to->real_type() == FIELD_TYPE_DECIMAL) return do_field_string; @@ -587,7 +587,7 @@ void field_conv(Field *to,Field *from) to->real_type() != FIELD_TYPE_ENUM && to->real_type() != FIELD_TYPE_SET && from->charset() == to->charset() && - to->table->db_low_byte_first == from->table->db_low_byte_first) + to->table->s->db_low_byte_first == from->table->s->db_low_byte_first) { // Identical fields memcpy(to->ptr,from->ptr,to->pack_length()); return; diff --git a/sql/filesort.cc b/sql/filesort.cc index 59e2491a7a2..6bad5202ec6 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -127,7 +127,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, param.ref_length= table->file->ref_length; param.addon_field= 0; param.addon_length= 0; - if (!(table->tmp_table || table->fulltext_searched)) + if (!(table->s->tmp_table || table->fulltext_searched)) { /* Get the descriptors of all fields whose values are appended @@ -463,7 +463,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, if (!flag) { my_store_ptr(ref_pos,ref_length,record); // Position to row - record+=sort_form->db_record_offset; + record+= sort_form->s->db_record_offset; } else file->position(sort_form->record[0]); diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc index 322126ff47b..fe266fdbf14 100644 --- a/sql/ha_berkeley.cc +++ b/sql/ha_berkeley.cc @@ -490,14 +490,16 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) { char name_buff[FN_REFLEN]; uint open_mode=(mode == O_RDONLY ? DB_RDONLY : 0) | DB_THREAD; + uint max_key_length; int error; + TABLE_SHARE *table_share= table->s; DBUG_ENTER("ha_berkeley::open"); /* Open primary key */ hidden_primary_key=0; - if ((primary_key=table->primary_key) >= MAX_KEY) + if ((primary_key= table_share->primary_key) >= MAX_KEY) { // No primary key - primary_key=table->keys; + primary_key= table_share->keys; key_used_on_scan=MAX_KEY; ref_length=hidden_primary_key=BDB_HIDDEN_PRIMARY_KEY_LENGTH; } @@ -505,18 +507,18 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) key_used_on_scan=primary_key; /* Need some extra memory in case of packed keys */ - uint max_key_length= table->max_key_length + MAX_REF_PARTS*3; + max_key_length= table_share->max_key_length + MAX_REF_PARTS*3; if (!(alloc_ptr= my_multi_malloc(MYF(MY_WME), &key_buff, max_key_length, &key_buff2, max_key_length, &primary_key_buff, (hidden_primary_key ? 0 : - table->key_info[table->primary_key].key_length), + table->key_info[table_share->primary_key].key_length), NullS))) DBUG_RETURN(1); /* purecov: inspected */ if (!(rec_buff= (byte*) my_malloc((alloced_rec_buff_length= - table->rec_buff_length), + table_share->rec_buff_length), MYF(MY_WME)))) { my_free(alloc_ptr,MYF(0)); /* purecov: inspected */ @@ -524,7 +526,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) } /* Init shared structure */ - if (!(share=get_share(name,table))) + if (!(share= get_share(name,table))) { my_free((char*) rec_buff,MYF(0)); /* purecov: inspected */ my_free(alloc_ptr,MYF(0)); /* purecov: inspected */ @@ -537,7 +539,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) /* Fill in shared structure, if needed */ pthread_mutex_lock(&share->mutex); - file = share->file; + file= share->file; if (!share->use_count++) { if ((error=db_create(&file, db_env, 0))) @@ -548,13 +550,13 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) my_errno=error; /* purecov: inspected */ DBUG_RETURN(1); /* purecov: inspected */ } - share->file = file; + share->file= file; file->set_bt_compare(file, (hidden_primary_key ? berkeley_cmp_hidden_key : berkeley_cmp_packed_key)); if (!hidden_primary_key) - file->app_private= (void*) (table->key_info+table->primary_key); + file->app_private= (void*) (table->key_info + table_share->primary_key); if ((error= txn_begin(db_env, 0, (DB_TXN**) &transaction, 0)) || (error= (file->open(file, transaction, fn_format(name_buff, name, "", ha_berkeley_ext, @@ -562,7 +564,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) "main", DB_BTREE, open_mode, 0))) || (error= transaction->commit(transaction, 0))) { - free_share(share,table, hidden_primary_key,1); /* purecov: inspected */ + free_share(share, table, hidden_primary_key,1); /* purecov: inspected */ my_free((char*) rec_buff,MYF(0)); /* purecov: inspected */ my_free(alloc_ptr,MYF(0)); /* purecov: inspected */ my_errno=error; /* purecov: inspected */ @@ -574,7 +576,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) key_type[primary_key]=DB_NOOVERWRITE; DB **ptr=key_file; - for (uint i=0, used_keys=0; i < table->keys ; i++, ptr++) + for (uint i=0, used_keys=0; i < table_share->keys ; i++, ptr++) { char part[7]; if (i != primary_key) @@ -606,7 +608,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) } } /* Calculate pack_length of primary key */ - share->fixed_length_primary_key=1; + share->fixed_length_primary_key= 1; if (!hidden_primary_key) { ref_length=0; @@ -616,18 +618,19 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) ref_length+= key_part->field->max_packed_col_length(key_part->length); share->fixed_length_primary_key= (ref_length == table->key_info[primary_key].key_length); - share->status|=STATUS_PRIMARY_KEY_INIT; + share->status|= STATUS_PRIMARY_KEY_INIT; } - share->ref_length=ref_length; + share->ref_length= ref_length; } - ref_length=share->ref_length; // If second open + ref_length= share->ref_length; // If second open pthread_mutex_unlock(&share->mutex); transaction=0; cursor=0; key_read=0; block_size=8192; // Berkeley DB block size - share->fixed_length_row=!(table->db_create_options & HA_OPTION_PACK_RECORD); + share->fixed_length_row= !(table_share->db_create_options & + HA_OPTION_PACK_RECORD); get_status(); info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); @@ -667,9 +670,15 @@ bool ha_berkeley::fix_rec_buff_for_blob(ulong length) ulong ha_berkeley::max_row_length(const byte *buf) { - ulong length=table->reclength + table->fields*2; - for (Field_blob **ptr=table->blob_field ; *ptr ; ptr++) - length+= (*ptr)->get_length((char*) buf+(*ptr)->offset())+2; + ulong length= table->s->reclength + table->s->fields*2; + uint *ptr, *end; + for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ; + ptr != end ; + ptr++) + { + Field_blob *blob= ((Field_blob*) table->field[*ptr]); + length+= blob->get_length((char*) buf + blob->offset())+2; + } return length; } @@ -685,29 +694,30 @@ ulong ha_berkeley::max_row_length(const byte *buf) int ha_berkeley::pack_row(DBT *row, const byte *record, bool new_row) { + byte *ptr; bzero((char*) row,sizeof(*row)); if (share->fixed_length_row) { row->data=(void*) record; - row->size=table->reclength+hidden_primary_key; + row->size= table->s->reclength+hidden_primary_key; if (hidden_primary_key) { if (new_row) get_auto_primary_key(current_ident); - memcpy_fixed((char*) record+table->reclength, (char*) current_ident, + memcpy_fixed((char*) record+table->s->reclength, (char*) current_ident, BDB_HIDDEN_PRIMARY_KEY_LENGTH); } return 0; } - if (table->blob_fields) + if (table->s->blob_fields) { if (fix_rec_buff_for_blob(max_row_length(record))) return HA_ERR_OUT_OF_MEM; /* purecov: inspected */ } /* Copy null bits */ - memcpy(rec_buff, record, table->null_bytes); - byte *ptr=rec_buff + table->null_bytes; + memcpy(rec_buff, record, table->s->null_bytes); + ptr= rec_buff + table->s->null_bytes; for (Field **field=table->field ; *field ; field++) ptr=(byte*) (*field)->pack((char*) ptr, @@ -730,13 +740,13 @@ int ha_berkeley::pack_row(DBT *row, const byte *record, bool new_row) void ha_berkeley::unpack_row(char *record, DBT *row) { if (share->fixed_length_row) - memcpy(record,(char*) row->data,table->reclength+hidden_primary_key); + memcpy(record,(char*) row->data,table->s->reclength+hidden_primary_key); else { /* Copy null bits */ const char *ptr= (const char*) row->data; - memcpy(record, ptr, table->null_bytes); - ptr+=table->null_bytes; + memcpy(record, ptr, table->s->null_bytes); + ptr+= table->s->null_bytes; for (Field **field=table->field ; *field ; field++) ptr= (*field)->unpack(record + (*field)->offset(), ptr); } @@ -882,7 +892,7 @@ int ha_berkeley::write_row(byte * record) DBUG_RETURN(error); /* purecov: inspected */ table->insert_or_update= 1; // For handling of VARCHAR - if (table->keys + test(hidden_primary_key) == 1) + if (table->s->keys + test(hidden_primary_key) == 1) { error=file->put(file, transaction, create_key(&prim_key, primary_key, key_buff, record), @@ -893,7 +903,8 @@ int ha_berkeley::write_row(byte * record) { DB_TXN *sub_trans = transaction; /* Don't use sub transactions in temporary tables */ - ulong thd_options = table->tmp_table == NO_TMP_TABLE ? table->in_use->options : 0; + ulong thd_options= (table->s->tmp_table == NO_TMP_TABLE ? + table->in_use->options : 0); for (uint retry=0 ; retry < berkeley_trans_retry ; retry++) { key_map changed_keys(0); @@ -908,7 +919,7 @@ int ha_berkeley::write_row(byte * record) &row, key_type[primary_key]))) { changed_keys.set_bit(primary_key); - for (uint keynr=0 ; keynr < table->keys ; keynr++) + for (uint keynr=0 ; keynr < table->s->keys ; keynr++) { if (keynr == primary_key) continue; @@ -940,7 +951,8 @@ int ha_berkeley::write_row(byte * record) else if (!changed_keys.is_clear_all()) { new_error = 0; - for (uint keynr=0 ; keynr < table->keys+test(hidden_primary_key) ; + for (uint keynr=0; + keynr < table->s->keys+test(hidden_primary_key); keynr++) { if (changed_keys.is_set(keynr)) @@ -1090,7 +1102,7 @@ int ha_berkeley::restore_keys(DB_TXN *trans, key_map *changed_keys, that one just put back the old value. */ if (!changed_keys->is_clear_all()) { - for (keynr=0 ; keynr < table->keys+test(hidden_primary_key) ; keynr++) + for (keynr=0 ; keynr < table->s->keys+test(hidden_primary_key) ; keynr++) { if (changed_keys->is_set(keynr)) { @@ -1117,9 +1129,9 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row) DBT prim_key, key, old_prim_key; int error; DB_TXN *sub_trans; - ulong thd_options = table->tmp_table == NO_TMP_TABLE ? table->in_use->options : 0; + ulong thd_options= (table->s->tmp_table == NO_TMP_TABLE ? + table->in_use->options : 0); bool primary_key_changed; - DBUG_ENTER("update_row"); LINT_INIT(error); @@ -1163,7 +1175,7 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row) thd_options, using_ignore))) { // Update all other keys - for (uint keynr=0 ; keynr < table->keys ; keynr++) + for (uint keynr=0 ; keynr < table->s->keys ; keynr++) { if (keynr == primary_key) continue; @@ -1294,7 +1306,9 @@ int ha_berkeley::remove_keys(DB_TXN *trans, const byte *record, DBT *new_record, DBT *prim_key, key_map *keys) { int result = 0; - for (uint keynr=0 ; keynr < table->keys+test(hidden_primary_key) ; keynr++) + for (uint keynr=0; + keynr < table->s->keys+test(hidden_primary_key); + keynr++) { if (keys->is_set(keynr)) { @@ -1314,8 +1328,9 @@ int ha_berkeley::delete_row(const byte * record) { int error; DBT row, prim_key; - key_map keys=table->keys_in_use; - ulong thd_options = table->tmp_table == NO_TMP_TABLE ? table->in_use->options : 0; + key_map keys= table->s->keys_in_use; + ulong thd_options= (table->s->tmp_table == NO_TMP_TABLE ? + table->in_use->options : 0); DBUG_ENTER("delete_row"); statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); @@ -1374,7 +1389,7 @@ int ha_berkeley::index_init(uint keynr) { int error; DBUG_ENTER("ha_berkeley::index_init"); - DBUG_PRINT("enter",("table: '%s' key: %d", table->real_name, keynr)); + DBUG_PRINT("enter",("table: '%s' key: %d", table->s->table_name, keynr)); /* Under some very rare conditions (like full joins) we may already have @@ -1401,7 +1416,7 @@ int ha_berkeley::index_end() DBUG_ENTER("ha_berkely::index_end"); if (cursor) { - DBUG_PRINT("enter",("table: '%s'", table->real_name)); + DBUG_PRINT("enter",("table: '%s'", table->s->table_name)); error=cursor->c_close(cursor); cursor=0; } @@ -1756,14 +1771,14 @@ void ha_berkeley::info(uint flag) if ((flag & HA_STATUS_CONST) || version != share->version) { version=share->version; - for (uint i=0 ; i < table->keys ; i++) + for (uint i=0 ; i < table->s->keys ; i++) { table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]= share->rec_per_key[i]; } } /* Don't return key if we got an error for the internal primary key */ - if (flag & HA_STATUS_ERRKEY && last_dup_key < table->keys) + if (flag & HA_STATUS_ERRKEY && last_dup_key < table->s->keys) errkey= last_dup_key; DBUG_VOID_RETURN; } @@ -2005,9 +2020,9 @@ int ha_berkeley::create(const char *name, register TABLE *form, if ((error= create_sub_table(name_buff,"main",DB_BTREE,0))) DBUG_RETURN(error); /* purecov: inspected */ - primary_key=table->primary_key; + primary_key= table->s->primary_key; /* Create the keys */ - for (uint i=0; i < form->keys; i++) + for (uint i=0; i < form->s->keys; i++) { if (i != primary_key) { @@ -2029,7 +2044,7 @@ int ha_berkeley::create(const char *name, register TABLE *form, "status", DB_BTREE, DB_CREATE, 0)))) { char rec_buff[4+MAX_KEY*4]; - uint length= 4+ table->keys*4; + uint length= 4+ table->s->keys*4; bzero(rec_buff, length); error= write_status(status_block, rec_buff, length); status_block->close(status_block,0); @@ -2149,9 +2164,9 @@ ulonglong ha_berkeley::get_auto_increment() (void) ha_berkeley::extra(HA_EXTRA_KEYREAD); /* Set 'active_index' */ - ha_berkeley::index_init(table->next_number_index); + ha_berkeley::index_init(table->s->next_number_index); - if (!table->next_number_key_offset) + if (!table->s->next_number_key_offset) { // Autoincrement at key-start error=ha_berkeley::index_last(table->record[1]); } @@ -2164,7 +2179,7 @@ ulonglong ha_berkeley::get_auto_increment() /* Reading next available number for a sub key */ ha_berkeley::create_key(&last_key, active_index, key_buff, table->record[0], - table->next_number_key_offset); + table->s->next_number_key_offset); /* Store for compare */ memcpy(old_key.data=key_buff2, key_buff, (old_key.size=last_key.size)); old_key.app_private=(void*) key_info; @@ -2193,8 +2208,8 @@ ulonglong ha_berkeley::get_auto_increment() } } if (!error) - nr=(ulonglong) - table->next_number_field->val_int_offset(table->rec_buff_length)+1; + nr= (ulonglong) + table->next_number_field->val_int_offset(table->s->rec_buff_length)+1; ha_berkeley::index_end(); (void) ha_berkeley::extra(HA_EXTRA_NO_KEYREAD); return nr; @@ -2279,7 +2294,7 @@ int ha_berkeley::analyze(THD* thd, HA_CHECK_OPT* check_opt) free(txn_stat_ptr); } - for (i=0 ; i < table->keys ; i++) + for (i=0 ; i < table->s->keys ; i++) { if (stat) { @@ -2414,14 +2429,15 @@ static BDB_SHARE *get_share(const char *table_name, TABLE *table) char *tmp_name; DB **key_file; u_int32_t *key_type; + uint keys= table->s->keys; if ((share=(BDB_SHARE *) my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), &share, sizeof(*share), - &rec_per_key, table->keys * sizeof(ha_rows), + &rec_per_key, keys * sizeof(ha_rows), &tmp_name, length+1, - &key_file, (table->keys+1) * sizeof(*key_file), - &key_type, (table->keys+1) * sizeof(u_int32_t), + &key_file, (keys+1) * sizeof(*key_file), + &key_type, (keys+1) * sizeof(u_int32_t), NullS))) { share->rec_per_key = rec_per_key; @@ -2448,7 +2464,7 @@ static int free_share(BDB_SHARE *share, TABLE *table, uint hidden_primary_key, bool mutex_is_locked) { int error, result = 0; - uint keys=table->keys + test(hidden_primary_key); + uint keys= table->s->keys + test(hidden_primary_key); pthread_mutex_lock(&bdb_mutex); if (mutex_is_locked) pthread_mutex_unlock(&share->mutex); /* purecov: inspected */ @@ -2512,8 +2528,8 @@ void ha_berkeley::get_status() } if (!(share->status & STATUS_ROW_COUNT_INIT) && share->status_block) { - share->org_rows=share->rows= - table->max_rows ? table->max_rows : HA_BERKELEY_MAX_ROWS; + share->org_rows= share->rows= + table->s->max_rows ? table->s->max_rows : HA_BERKELEY_MAX_ROWS; if (!share->status_block->cursor(share->status_block, 0, &cursor, 0)) { DBT row; @@ -2528,9 +2544,10 @@ void ha_berkeley::get_status() uint i; uchar *pos=(uchar*) row.data; share->org_rows=share->rows=uint4korr(pos); pos+=4; - for (i=0 ; i < table->keys ; i++) + for (i=0 ; i < table->s->keys ; i++) { - share->rec_per_key[i]=uint4korr(pos); pos+=4; + share->rec_per_key[i]=uint4korr(pos); + pos+=4; } } cursor->c_close(cursor); @@ -2588,7 +2605,7 @@ static void update_status(BDB_SHARE *share, TABLE *table) { char rec_buff[4+MAX_KEY*4], *pos=rec_buff; int4store(pos,share->rows); pos+=4; - for (uint i=0 ; i < table->keys ; i++) + for (uint i=0 ; i < table->s->keys ; i++) { int4store(pos,share->rec_per_key[i]); pos+=4; } @@ -2622,7 +2639,7 @@ int ha_berkeley::cmp_ref(const byte *ref1, const byte *ref2) int result; Field *field; - KEY *key_info=table->key_info+table->primary_key; + KEY *key_info=table->key_info+table->s->primary_key; KEY_PART_INFO *key_part=key_info->key_part; KEY_PART_INFO *end=key_part+key_info->key_parts; diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index 3118833a47e..abd33f2eaef 100755 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -370,18 +370,18 @@ static byte* federated_get_key(FEDERATED_SHARE *share,uint *length, } /* - Parse connection info from table->comment + Parse connection info from table->s->comment SYNOPSIS parse_url() share pointer to FEDERATED share table pointer to current TABLE class - + DESCRIPTION populates the share with information about the connection to the remote database that will serve as the data source. This string must be specified (currently) in the "comment" field, - listed in the CREATE TABLE statement. + listed in the CREATE TABLE statement. This string MUST be in the format of any of these: @@ -401,10 +401,10 @@ scheme://username:password@hostname/database/table RETURN VALUE 0 success - -1 failure, wrong string format + -1 failure, wrong string format */ -int parse_url(FEDERATED_SHARE *share, TABLE *table, uint table_create_flag) +static int parse_url(FEDERATED_SHARE *share, TABLE *table, uint table_create_flag) { DBUG_ENTER("ha_federated::parse_url"); @@ -412,7 +412,7 @@ int parse_url(FEDERATED_SHARE *share, TABLE *table, uint table_create_flag) share->port= 0; uint error_num= table_create_flag ? ER_CANT_CREATE_TABLE : ER_CONNECT_TO_MASTER ; - share->scheme= my_strdup(table->comment, MYF(0)); + share->scheme= my_strdup(table->s->comment, MYF(0)); if (share->username= strstr(share->scheme, "://")) @@ -429,7 +429,7 @@ int parse_url(FEDERATED_SHARE *share, TABLE *table, uint table_create_flag) } share->username+= 3; - if (share->hostname= strchr(share->username, '@')) + if (share->hostname= strchr(share->username, '@')) { share->username[share->hostname - share->username]= '\0'; share->hostname++; @@ -569,7 +569,7 @@ uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row) DBUG_ENTER("ha_federated::convert_row_to_internal_format"); // Question this - memset(record, 0, table->null_bytes); + memset(record, 0, table->s->null_bytes); for (Field **field=table->field; *field ; field++, x++) { @@ -742,8 +742,6 @@ int load_conn_info(FEDERATED_SHARE *share, TABLE *table) static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) { FEDERATED_SHARE *share; - // FIX : need to redo - //String query; char query_buffer[IO_SIZE]; String query(query_buffer, sizeof(query_buffer), &my_charset_bin); query.length(0); @@ -753,7 +751,7 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) // share->table_name has the file location - we want the actual table's // name! - table_base_name= table->table_name; + table_base_name= (char *)table->s->table_name; DBUG_PRINT("ha_federated::get_share",("table_name %s", table_base_name)); /* So why does this exist? There is no way currently to init a storage engine. @@ -1160,9 +1158,9 @@ int ha_federated::update_row( DBUG_ENTER("ha_federated::update_row"); - has_a_primary_key= table->primary_key == 0 ? 1 : 0; + has_a_primary_key= table->s->primary_key == 0 ? 1 : 0; primary_key_field_num= has_a_primary_key ? - table->key_info[table->primary_key].key_part->fieldnr -1 : -1; + table->key_info[table->s->primary_key].key_part->fieldnr -1 : -1; if (has_a_primary_key) DBUG_PRINT("ha_federated::update_row", ("has a primary key")); @@ -1243,7 +1241,7 @@ int ha_federated::update_row( update_string.append(new_field_value); new_field_value.length(0); - if (x+1 < table->fields) + if (x+1 < table->s->fields) { update_string.append(", "); if (! has_a_primary_key) @@ -1319,7 +1317,7 @@ int ha_federated::delete_row(const byte * buf) delete_string.append(data_string); data_string.length(0); - if (x+1 < table->fields) + if (x+1 < table->s->fields) delete_string.append(" AND "); } @@ -1422,7 +1420,7 @@ int ha_federated::index_init(uint keynr) int error; DBUG_ENTER("ha_federated::index_init"); DBUG_PRINT("ha_federated::index_init", - ("table: '%s' key: %d", table->real_name, keynr)); + ("table: '%s' key: %d", table->s->table_name, keynr)); active_index= keynr; DBUG_RETURN(0); } @@ -1522,7 +1520,7 @@ void ha_federated::position(const byte *record) { DBUG_ENTER("ha_federated::position"); //ha_store_ptr Add seek storage - ha_store_ptr(ref, ref_length, current_position); + *(MYSQL_ROW_OFFSET *)ref=current_position; // ref is always aligned DBUG_VOID_RETURN; } @@ -1541,7 +1539,7 @@ int ha_federated::rnd_pos(byte * buf, byte *pos) { DBUG_ENTER("ha_federated::rnd_pos"); statistic_increment(table->in_use->status_var.ha_read_rnd_count,&LOCK_status); - current_position= ha_get_ptr(pos,ref_length); + memcpy(current_position, pos, sizeof(MYSQL_ROW_OFFSET)); // pos is not aligned result->current_row= 0; result->data_cursor= current_position; DBUG_RETURN(rnd_next(buf)); @@ -1706,10 +1704,10 @@ THR_LOCK_DATA **ha_federated::store_lock(THD *thd, int ha_federated::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info) { - DBUG_ENTER("ha_federated::create"); int retcode; - FEDERATED_SHARE *tmp; - retcode= parse_url(tmp, table_arg, 1); + FEDERATED_SHARE tmp; + DBUG_ENTER("ha_federated::create"); + retcode= parse_url(&tmp, table_arg, 1); if (retcode < 0) { DBUG_PRINT("ha_federated::create", diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc index 1556a18bfca..b5742699d7e 100644 --- a/sql/ha_heap.cc +++ b/sql/ha_heap.cc @@ -60,7 +60,7 @@ int ha_heap::open(const char *name, int mode, uint test_if_locked) { /* Initialize variables for the opened table */ set_keys_for_scanning(); - if (table->tmp_table == NO_TMP_TABLE) + if (table->s->tmp_table == NO_TMP_TABLE) update_key_stats(); } return (file ? 0 : 1); @@ -91,7 +91,7 @@ int ha_heap::close(void) void ha_heap::set_keys_for_scanning(void) { btree_keys.clear_all(); - for (uint i= 0 ; i < table->keys ; i++) + for (uint i= 0 ; i < table->s->keys ; i++) { if (table->key_info[i].algorithm == HA_KEY_ALG_BTREE) btree_keys.set_bit(i); @@ -101,7 +101,7 @@ void ha_heap::set_keys_for_scanning(void) void ha_heap::update_key_stats() { - for (uint i= 0; i < table->keys; i++) + for (uint i= 0; i < table->s->keys; i++) { KEY *key=table->key_info+i; if (key->algorithm != HA_KEY_ALG_BTREE) @@ -124,7 +124,7 @@ int ha_heap::write_row(byte * buf) if (table->next_number_field && buf == table->record[0]) update_auto_increment(); res= heap_write(file,buf); - if (!res && table->tmp_table == NO_TMP_TABLE && + if (!res && table->s->tmp_table == NO_TMP_TABLE && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records) update_key_stats(); return res; @@ -137,7 +137,7 @@ int ha_heap::update_row(const byte * old_data, byte * new_data) if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); res= heap_update(file,old_data,new_data); - if (!res && table->tmp_table == NO_TMP_TABLE && + if (!res && table->s->tmp_table == NO_TMP_TABLE && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records) update_key_stats(); return res; @@ -148,7 +148,7 @@ int ha_heap::delete_row(const byte * buf) int res; statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); res= heap_delete(file,buf); - if (!res && table->tmp_table == NO_TMP_TABLE && + if (!res && table->s->tmp_table == NO_TMP_TABLE && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records) update_key_stats(); return res; @@ -282,7 +282,7 @@ int ha_heap::extra(enum ha_extra_function operation) int ha_heap::delete_all_rows() { heap_clear(file); - if (table->tmp_table == NO_TMP_TABLE) + if (table->s->tmp_table == NO_TMP_TABLE) update_key_stats(); return 0; } @@ -448,23 +448,24 @@ ha_rows ha_heap::records_in_range(uint inx, key_range *min_key, int ha_heap::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info) { - uint key, parts, mem_per_row= 0; + uint key, parts, mem_per_row= 0, keys= table_arg->s->keys; uint auto_key= 0, auto_key_type= 0; ha_rows max_rows; HP_KEYDEF *keydef; HA_KEYSEG *seg; char buff[FN_REFLEN]; int error; + TABLE_SHARE *share= table_arg->s; - for (key= parts= 0; key < table_arg->keys; key++) + for (key= parts= 0; key < keys; key++) parts+= table_arg->key_info[key].key_parts; - if (!(keydef= (HP_KEYDEF*) my_malloc(table_arg->keys * sizeof(HP_KEYDEF) + + if (!(keydef= (HP_KEYDEF*) my_malloc(keys * sizeof(HP_KEYDEF) + parts * sizeof(HA_KEYSEG), MYF(MY_WME)))) return my_errno; - seg= my_reinterpret_cast(HA_KEYSEG*) (keydef + table_arg->keys); - for (key= 0; key < table_arg->keys; key++) + seg= my_reinterpret_cast(HA_KEYSEG*) (keydef + keys); + for (key= 0; key < keys; key++) { KEY *pos= table_arg->key_info+key; KEY_PART_INFO *key_part= pos->key_part; @@ -516,7 +517,7 @@ int ha_heap::create(const char *name, TABLE *table_arg, } } } - mem_per_row+= MY_ALIGN(table_arg->reclength + 1, sizeof(char*)); + mem_per_row+= MY_ALIGN(share->reclength + 1, sizeof(char*)); max_rows = (ha_rows) (table->in_use->variables.max_heap_table_size / mem_per_row); HP_CREATE_INFO hp_create_info; @@ -525,11 +526,11 @@ int ha_heap::create(const char *name, TABLE *table_arg, hp_create_info.auto_increment= (create_info->auto_increment_value ? create_info->auto_increment_value - 1 : 0); error= heap_create(fn_format(buff,name,"","",4+2), - table_arg->keys,keydef, table_arg->reclength, - (ulong) ((table_arg->max_rows < max_rows && - table_arg->max_rows) ? - table_arg->max_rows : max_rows), - (ulong) table_arg->min_rows, &hp_create_info); + keys,keydef, share->reclength, + (ulong) ((share->max_rows < max_rows && + share->max_rows) ? + share->max_rows : max_rows), + (ulong) share->min_rows, &hp_create_info); my_free((gptr) keydef, MYF(0)); if (file) info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE); diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 91517770d04..31b15f89806 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -1768,7 +1768,7 @@ ha_innobase::open( fields when packed actually became 1 byte longer, when we also stored the string length as the first byte. */ - upd_and_key_val_buff_len = table->reclength + table->max_key_length + upd_and_key_val_buff_len = table->s->reclength + table->s->max_key_length + MAX_REF_PARTS * 3; if (!(mysql_byte*) my_multi_malloc(MYF(MY_WME), &upd_buff, upd_and_key_val_buff_len, @@ -1820,11 +1820,11 @@ ha_innobase::open( innobase_prebuilt = row_create_prebuilt(ib_table); - ((row_prebuilt_t*)innobase_prebuilt)->mysql_row_len = table->reclength; + ((row_prebuilt_t*)innobase_prebuilt)->mysql_row_len = table->s->reclength; /* Looks like MySQL-3.23 sometimes has primary key number != 0 */ - primary_key = table->primary_key; + primary_key = table->s->primary_key; key_used_on_scan = primary_key; /* Allocate a buffer for a 'row reference'. A row reference is @@ -1995,7 +1995,7 @@ reset_null_bits( TABLE* table, /* in: MySQL table object */ char* record) /* in: a row in MySQL format */ { - bzero(record, table->null_bytes); + bzero(record, table->s->null_bytes); } extern "C" { @@ -2349,7 +2349,7 @@ build_template( the clustered index */ } - n_fields = (ulint)table->fields; /* number of columns */ + n_fields = (ulint)table->s->fields; /* number of columns */ if (!prebuilt->mysql_template) { prebuilt->mysql_template = (mysql_row_templ_t*) @@ -2358,7 +2358,7 @@ build_template( } prebuilt->template_type = templ_type; - prebuilt->null_bitmap_len = table->null_bytes; + prebuilt->null_bitmap_len = table->s->null_bytes; prebuilt->templ_contains_blob = FALSE; @@ -2725,7 +2725,7 @@ calc_row_difference( ulint n_changed = 0; uint i; - n_fields = table->fields; + n_fields = table->s->fields; /* We use upd_buff to convert changed fields */ buf = (byte*) upd_buff; @@ -3215,7 +3215,7 @@ ha_innobase::change_active_index( active_index = keynr; - if (keynr != MAX_KEY && table->keys > 0) { + if (keynr != MAX_KEY && table->s->keys > 0) { key = table->key_info + active_index; prebuilt->index = dict_table_get_index_noninline( @@ -3635,7 +3635,7 @@ create_table_def( DBUG_ENTER("create_table_def"); DBUG_PRINT("enter", ("table_name: %s", table_name)); - n_cols = form->fields; + n_cols = form->s->fields; /* We pass 0 as the space id, and determine at a lower level the space id where to store the table */ @@ -3727,7 +3727,7 @@ create_index( ind_type = 0; - if (key_num == form->primary_key) { + if (key_num == form->s->primary_key) { ind_type = ind_type | DICT_CLUSTERED; } @@ -3750,7 +3750,7 @@ create_index( the length of the key part versus the column. */ field = NULL; - for (j = 0; j < form->fields; j++) { + for (j = 0; j < form->s->fields; j++) { field = form->field[j]; @@ -3763,7 +3763,7 @@ create_index( } } - ut_a(j < form->fields); + ut_a(j < form->s->fields); col_type = get_innobase_type_from_mysql_type(key_part->field); @@ -3857,7 +3857,7 @@ ha_innobase::create( DBUG_ASSERT(thd != NULL); - if (form->fields > 1000) { + if (form->s->fields > 1000) { /* The limit probably should be REC_MAX_N_FIELDS - 3 = 1020, but we play safe here */ @@ -3907,7 +3907,7 @@ ha_innobase::create( error = create_table_def(trx, form, norm_name, create_info->options & HA_LEX_CREATE_TMP_TABLE ? name2 : NULL, - !(form->db_options_in_use & HA_OPTION_PACK_RECORD)); + !(form->s->db_options_in_use & HA_OPTION_PACK_RECORD)); if (error) { innobase_commit_low(trx); @@ -3921,8 +3921,8 @@ ha_innobase::create( /* Look for a primary key */ - primary_key_no= (table->primary_key != MAX_KEY ? - (int) table->primary_key : + primary_key_no= (table->s->primary_key != MAX_KEY ? + (int) table->s->primary_key : -1); /* Our function row_get_mysql_key_number_for_index assumes @@ -3932,7 +3932,7 @@ ha_innobase::create( /* Create the keys */ - if (form->keys == 0 || primary_key_no == -1) { + if (form->s->keys == 0 || primary_key_no == -1) { /* Create an index which is used as the clustered index; order the rows by their row id which is internally generated by InnoDB */ @@ -3965,7 +3965,7 @@ ha_innobase::create( } } - for (i = 0; i < form->keys; i++) { + for (i = 0; i < form->s->keys; i++) { if (i != (uint) primary_key_no) { @@ -4330,11 +4330,11 @@ ha_innobase::records_in_range( KEY* key; dict_index_t* index; mysql_byte* key_val_buff2 = (mysql_byte*) my_malloc( - table->reclength - + table->max_key_length + 100, + table->s->reclength + + table->s->max_key_length + 100, MYF(MY_WME)); - ulint buff2_len = table->reclength - + table->max_key_length + 100; + ulint buff2_len = table->s->reclength + + table->s->max_key_length + 100; dtuple_t* range_start; dtuple_t* range_end; ib_longlong n_rows; @@ -4491,7 +4491,7 @@ ha_innobase::read_time( ha_rows total_rows; double time_for_scan; - if (index != table->primary_key) + if (index != table->s->primary_key) return handler::read_time(index, ranges, rows); // Not clustered if (rows <= 2) @@ -4610,7 +4610,7 @@ ha_innobase::info( index = dict_table_get_next_index_noninline(index); } - for (i = 0; i < table->keys; i++) { + for (i = 0; i < table->s->keys; i++) { if (index == NULL) { ut_print_timestamp(stderr); fprintf(stderr, @@ -5803,7 +5803,7 @@ ha_innobase::innobase_read_and_init_auto_inc( } (void) extra(HA_EXTRA_KEYREAD); - index_init(table->next_number_index); + index_init(table->s->next_number_index); /* We use an exclusive lock when we read the max key value from the auto-increment column index. This is because then build_template will @@ -5838,7 +5838,7 @@ ha_innobase::innobase_read_and_init_auto_inc( } else { /* Initialize to max(col) + 1 */ auto_inc = (longlong) table->next_number_field-> - val_int_offset(table->rec_buff_length) + 1; + val_int_offset(table->s->rec_buff_length) + 1; } dict_table_autoinc_initialize(prebuilt->table, auto_inc); @@ -5925,9 +5925,9 @@ ha_innobase::cmp_ref( /* Do type-aware comparison of Primary Key members. PK members are always NOT NULL, so no checks for NULL are performed */ - KEY_PART_INFO *key_part= table->key_info[table->primary_key].key_part; + KEY_PART_INFO *key_part= table->key_info[table->s->primary_key].key_part; KEY_PART_INFO *key_part_end= - key_part + table->key_info[table->primary_key].key_parts; + key_part + table->key_info[table->s->primary_key].key_parts; for (; key_part != key_part_end; ++key_part) { field = key_part->field; mysql_type = field->type(); diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index c89eb4426ff..87329c6f4af 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -233,7 +233,7 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked) info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED)) VOID(mi_extra(file, HA_EXTRA_WAIT_LOCK, 0)); - if (!table->db_record_offset) + if (!table->s->db_record_offset) int_table_flags|=HA_REC_NOT_IN_SEQ; if (file->s->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD)) int_table_flags|=HA_HAS_CHECKSUM; @@ -275,9 +275,9 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt) thd->proc_info="Checking table"; myisamchk_init(¶m); param.thd = thd; - param.op_name = (char*)"check"; - param.db_name = table->table_cache_key; - param.table_name = table->table_name; + param.op_name = "check"; + param.db_name= table->s->db; + param.table_name= table->alias; param.testflag = check_opt->flags | T_CHECK | T_SILENT; if (!(table->db_stat & HA_READ_ONLY)) @@ -362,11 +362,11 @@ int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt) myisamchk_init(¶m); param.thd = thd; - param.op_name = (char*) "analyze"; - param.db_name = table->table_cache_key; - param.table_name = table->table_name; - param.testflag=(T_FAST | T_CHECK | T_SILENT | T_STATISTICS | - T_DONT_CHECK_CHECKSUM); + param.op_name= "analyze"; + param.db_name= table->s->db; + param.table_name= table->alias; + param.testflag= (T_FAST | T_CHECK | T_SILENT | T_STATISTICS | + T_DONT_CHECK_CHECKSUM); param.using_global_keycache = 1; if (!(share->state.changed & STATE_NOT_ANALYZED)) @@ -388,9 +388,9 @@ int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt) int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt) { HA_CHECK_OPT tmp_check_opt; - char* backup_dir= thd->lex->backup_dir; + char *backup_dir= thd->lex->backup_dir; char src_path[FN_REFLEN], dst_path[FN_REFLEN]; - char* table_name = table->real_name; + const char *table_name= table->s->table_name; int error; const char* errmsg; DBUG_ENTER("restore"); @@ -399,11 +399,11 @@ int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt) MI_NAME_DEXT)) DBUG_RETURN(HA_ADMIN_INVALID); - if (my_copy(src_path, fn_format(dst_path, table->path, "", + if (my_copy(src_path, fn_format(dst_path, table->s->path, "", MI_NAME_DEXT, 4), MYF(MY_WME))) { - error = HA_ADMIN_FAILED; - errmsg = "Failed in my_copy (Error %d)"; + error= HA_ADMIN_FAILED; + errmsg= "Failed in my_copy (Error %d)"; goto err; } @@ -415,11 +415,11 @@ int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt) { MI_CHECK param; myisamchk_init(¶m); - param.thd = thd; - param.op_name = (char*)"restore"; - param.db_name = table->table_cache_key; - param.table_name = table->table_name; - param.testflag = 0; + param.thd= thd; + param.op_name= "restore"; + param.db_name= table->s->db; + param.table_name= table->s->table_name; + param.testflag= 0; mi_check_print_error(¶m, errmsg, my_errno); DBUG_RETURN(error); } @@ -428,9 +428,9 @@ int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt) int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt) { - char* backup_dir= thd->lex->backup_dir; + char *backup_dir= thd->lex->backup_dir; char src_path[FN_REFLEN], dst_path[FN_REFLEN]; - char* table_name = table->real_name; + const char *table_name= table->s->table_name; int error; const char *errmsg; DBUG_ENTER("ha_myisam::backup"); @@ -438,12 +438,13 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt) if (fn_format_relative_to_data_home(dst_path, table_name, backup_dir, reg_ext)) { - errmsg = "Failed in fn_format() for .frm file (errno: %d)"; - error = HA_ADMIN_INVALID; + errmsg= "Failed in fn_format() for .frm file (errno: %d)"; + error= HA_ADMIN_INVALID; goto err; } - if (my_copy(fn_format(src_path, table->path,"", reg_ext, MY_UNPACK_FILENAME), + if (my_copy(fn_format(src_path, table->s->path, "", reg_ext, + MY_UNPACK_FILENAME), dst_path, MYF(MY_WME | MY_HOLD_ORIGINAL_MODES | MY_DONT_OVERWRITE_FILE))) { @@ -461,7 +462,7 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt) goto err; } - if (my_copy(fn_format(src_path, table->path,"", MI_NAME_DEXT, + if (my_copy(fn_format(src_path, table->s->path, "", MI_NAME_DEXT, MY_UNPACK_FILENAME), dst_path, MYF(MY_WME | MY_HOLD_ORIGINAL_MODES | MY_DONT_OVERWRITE_FILE))) @@ -476,11 +477,11 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt) { MI_CHECK param; myisamchk_init(¶m); - param.thd = thd; - param.op_name = (char*)"backup"; - param.db_name = table->table_cache_key; - param.table_name = table->table_name; - param.testflag = 0; + param.thd= thd; + param.op_name= "backup"; + param.db_name= table->s->db; + param.table_name= table->s->table_name; + param.testflag = 0; mi_check_print_error(¶m,errmsg, my_errno); DBUG_RETURN(error); } @@ -497,10 +498,10 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) myisamchk_init(¶m); param.thd = thd; - param.op_name = (char*) "repair"; - param.testflag = ((check_opt->flags & ~(T_EXTEND)) | - T_SILENT | T_FORCE_CREATE | T_CALC_CHECKSUM | - (check_opt->flags & T_EXTEND ? T_REP : T_REP_BY_SORT)); + param.op_name= "repair"; + param.testflag= ((check_opt->flags & ~(T_EXTEND)) | + T_SILENT | T_FORCE_CREATE | T_CALC_CHECKSUM | + (check_opt->flags & T_EXTEND ? T_REP : T_REP_BY_SORT)); param.sort_buffer_length= check_opt->sort_buffer_size; start_records=file->state->records; while ((error=repair(thd,param,0)) && param.retry_repair) @@ -511,7 +512,7 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) { param.testflag&= ~T_RETRY_WITHOUT_QUICK; sql_print_information("Retrying repair of: '%s' without quick", - table->path); + table->s->path); continue; } param.testflag&= ~T_QUICK; @@ -519,7 +520,7 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) { param.testflag= (param.testflag & ~T_REP_BY_SORT) | T_REP; sql_print_information("Retrying repair of: '%s' with keycache", - table->path); + table->s->path); continue; } break; @@ -531,7 +532,7 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) sql_print_information("Found %s of %s rows when repairing '%s'", llstr(file->state->records, llbuff), llstr(start_records, llbuff2), - table->path); + table->s->path); } return error; } @@ -544,9 +545,9 @@ int ha_myisam::optimize(THD* thd, HA_CHECK_OPT *check_opt) myisamchk_init(¶m); param.thd = thd; - param.op_name = (char*) "optimize"; - param.testflag = (check_opt->flags | T_SILENT | T_FORCE_CREATE | - T_REP_BY_SORT | T_STATISTICS | T_SORT_INDEX); + param.op_name= "optimize"; + param.testflag= (check_opt->flags | T_SILENT | T_FORCE_CREATE | + T_REP_BY_SORT | T_STATISTICS | T_SORT_INDEX); param.sort_buffer_length= check_opt->sort_buffer_size; if ((error= repair(thd,param,1)) && param.retry_repair) { @@ -570,18 +571,18 @@ int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize) ha_rows rows= file->state->records; DBUG_ENTER("ha_myisam::repair"); - param.db_name = table->table_cache_key; - param.table_name = table->table_name; + param.db_name= table->s->db; + param.table_name= table->alias; param.tmpfile_createflag = O_RDWR | O_TRUNC; param.using_global_keycache = 1; - param.thd=thd; - param.tmpdir=&mysql_tmpdir_list; - param.out_flag=0; + param.thd= thd; + param.tmpdir= &mysql_tmpdir_list; + param.out_flag= 0; strmov(fixed_name,file->filename); // Don't lock tables if we have used LOCK TABLE if (!thd->locked_tables && - mi_lock_database(file, table->tmp_table ? F_EXTRA_LCK : F_WRLCK)) + mi_lock_database(file, table->s->tmp_table ? F_EXTRA_LCK : F_WRLCK)) { mi_check_print_error(¶m,ER(ER_CANT_LOCK),my_errno); DBUG_RETURN(HA_ADMIN_FAILED); @@ -740,9 +741,9 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt) MI_CHECK param; myisamchk_init(¶m); param.thd= thd; - param.op_name= (char*)"assign_to_keycache"; - param.db_name= table->table_cache_key; - param.table_name= table->table_name; + param.op_name= "assign_to_keycache"; + param.db_name= table->s->db; + param.table_name= table->s->table_name; param.testflag= 0; mi_check_print_error(¶m, errmsg); } @@ -808,10 +809,10 @@ int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt) MI_CHECK param; myisamchk_init(¶m); param.thd= thd; - param.op_name= (char*)"preload_keys"; - param.db_name= table->table_cache_key; - param.table_name= table->table_name; - param.testflag= 0; + param.op_name= "preload_keys"; + param.db_name= table->s->db; + param.table_name= table->s->table_name; + param.testflag= 0; mi_check_print_error(¶m, errmsg); DBUG_RETURN(error); } @@ -916,9 +917,9 @@ int ha_myisam::enable_indexes(uint mode) const char *save_proc_info=thd->proc_info; thd->proc_info="Creating index"; myisamchk_init(¶m); - param.op_name = (char*) "recreating_index"; - param.testflag = (T_SILENT | T_REP_BY_SORT | T_QUICK | - T_CREATE_MISSING_KEYS); + param.op_name= "recreating_index"; + param.testflag= (T_SILENT | T_REP_BY_SORT | T_QUICK | + T_CREATE_MISSING_KEYS); param.myf_rw&= ~MY_WAIT_IF_FULL; param.sort_buffer_length= thd->variables.myisam_sort_buff_size; param.tmpdir=&mysql_tmpdir_list; @@ -983,8 +984,9 @@ int ha_myisam::indexes_are_disabled(void) void ha_myisam::start_bulk_insert(ha_rows rows) { DBUG_ENTER("ha_myisam::start_bulk_insert"); - THD *thd=current_thd; - ulong size= min(thd->variables.read_buff_size, table->avg_row_length*rows); + THD *thd= current_thd; + ulong size= min(thd->variables.read_buff_size, + table->s->avg_row_length*rows); DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu", (ulong) rows, size)); @@ -1052,18 +1054,18 @@ bool ha_myisam::check_and_repair(THD *thd) // Don't use quick if deleted rows if (!file->state->del && (myisam_recover_options & HA_RECOVER_QUICK)) check_opt.flags|=T_QUICK; - sql_print_warning("Checking table: '%s'",table->path); + sql_print_warning("Checking table: '%s'",table->s->path); old_query= thd->query; old_query_length= thd->query_length; pthread_mutex_lock(&LOCK_thread_count); - thd->query= table->real_name; - thd->query_length= strlen(table->real_name); + thd->query= (char*) table->s->table_name; + thd->query_length= strlen(table->s->table_name); pthread_mutex_unlock(&LOCK_thread_count); if ((marked_crashed= mi_is_crashed(file)) || check(thd, &check_opt)) { - sql_print_warning("Recovering table: '%s'",table->path); + sql_print_warning("Recovering table: '%s'",table->s->path); check_opt.flags= ((myisam_recover_options & HA_RECOVER_BACKUP ? T_BACKUP_DATA : 0) | (marked_crashed ? 0 : T_QUICK) | @@ -1237,25 +1239,25 @@ void ha_myisam::info(uint flag) } if (flag & HA_STATUS_CONST) { - max_data_file_length=info.max_data_file_length; - max_index_file_length=info.max_index_file_length; - create_time = info.create_time; - sortkey = info.sortkey; - ref_length=info.reflength; - table->db_options_in_use = info.options; - block_size=myisam_block_size; - table->keys_in_use.set_prefix(table->keys); - table->keys_in_use.intersect(info.key_map); - table->keys_for_keyread= table->keys_in_use; - table->keys_for_keyread.subtract(table->read_only_keys); - table->db_record_offset=info.record_offset; - if (table->key_parts) + TABLE_SHARE *share= table->s; + max_data_file_length= info.max_data_file_length; + max_index_file_length= info.max_index_file_length; + create_time= info.create_time; + sortkey= info.sortkey; + ref_length= info.reflength; + share->db_options_in_use= info.options; + block_size= myisam_block_size; + share->keys_in_use.set_prefix(share->keys); + share->keys_in_use.intersect(info.key_map); + share->keys_for_keyread.intersect(share->keys_in_use); + share->db_record_offset= info.record_offset; + if (share->key_parts) memcpy((char*) table->key_info[0].rec_per_key, (char*) info.rec_per_key, - sizeof(table->key_info[0].rec_per_key)*table->key_parts); - raid_type=info.raid_type; - raid_chunks=info.raid_chunks; - raid_chunksize=info.raid_chunksize; + sizeof(table->key_info[0].rec_per_key)*share->key_parts); + raid_type= info.raid_type; + raid_chunks= info.raid_chunks; + raid_chunksize= info.raid_chunksize; /* Set data_file_name and index_file_name to point at the symlink value @@ -1311,7 +1313,7 @@ int ha_myisam::delete_table(const char *name) int ha_myisam::external_lock(THD *thd, int lock_type) { - return mi_lock_database(file, !table->tmp_table ? + return mi_lock_database(file, !table->s->tmp_table ? lock_type : ((lock_type == F_UNLCK) ? F_UNLCK : F_EXTRA_LCK)); } @@ -1356,21 +1358,23 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, MI_KEYDEF *keydef; MI_COLUMNDEF *recinfo,*recinfo_pos; HA_KEYSEG *keyseg; - uint options=table_arg->db_options_in_use; + TABLE_SHARE *share= table->s; + uint options= share->db_options_in_use; DBUG_ENTER("ha_myisam::create"); type=HA_KEYTYPE_BINARY; // Keep compiler happy if (!(my_multi_malloc(MYF(MY_WME), - &recinfo,(table_arg->fields*2+2)*sizeof(MI_COLUMNDEF), - &keydef, table_arg->keys*sizeof(MI_KEYDEF), + &recinfo,(share->fields*2+2)* + sizeof(MI_COLUMNDEF), + &keydef, share->keys*sizeof(MI_KEYDEF), &keyseg, - ((table_arg->key_parts + table_arg->keys) * + ((share->key_parts + share->keys) * sizeof(HA_KEYSEG)), NullS))) DBUG_RETURN(HA_ERR_OUT_OF_MEM); pos=table_arg->key_info; - for (i=0; i < table_arg->keys ; i++, pos++) + for (i=0; i < share->keys ; i++, pos++) { keydef[i].flag= (pos->flags & (HA_NOSAME | HA_FULLTEXT | HA_SPATIAL)); keydef[i].key_alg= pos->algorithm == HA_KEY_ALG_UNDEF ? @@ -1431,7 +1435,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, keydef[i].seg[j].flag|=HA_BLOB_PART; /* save number of bytes used to pack length */ keydef[i].seg[j].bit_start= (uint) (field->pack_length() - - table_arg->blob_ptr_size); + share->blob_ptr_size); } else if (field->type() == FIELD_TYPE_BIT) { @@ -1446,15 +1450,16 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, if (table_arg->found_next_number_field) { - keydef[table_arg->next_number_index].flag|= HA_AUTO_KEY; - found_real_auto_increment= table_arg->next_number_key_offset == 0; + keydef[share->next_number_index].flag|= HA_AUTO_KEY; + found_real_auto_increment= share->next_number_key_offset == 0; } recpos=0; recinfo_pos=recinfo; - while (recpos < (uint) table_arg->reclength) + while (recpos < (uint) share->reclength) { Field **field,*found=0; - minpos=table_arg->reclength; length=0; + minpos= share->reclength; + length=0; for (field=table_arg->field ; *field ; field++) { @@ -1517,25 +1522,25 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, } MI_CREATE_INFO create_info; bzero((char*) &create_info,sizeof(create_info)); - create_info.max_rows=table_arg->max_rows; - create_info.reloc_rows=table_arg->min_rows; + create_info.max_rows= share->max_rows; + create_info.reloc_rows= share->min_rows; create_info.with_auto_increment=found_real_auto_increment; create_info.auto_increment=(info->auto_increment_value ? info->auto_increment_value -1 : (ulonglong) 0); - create_info.data_file_length= ((ulonglong) table_arg->max_rows * - table_arg->avg_row_length); + create_info.data_file_length= ((ulonglong) share->max_rows * + share->avg_row_length); create_info.raid_type=info->raid_type; create_info.raid_chunks= (info->raid_chunks ? info->raid_chunks : RAID_DEFAULT_CHUNKS); - create_info.raid_chunksize=(info->raid_chunksize ? info->raid_chunksize : - RAID_DEFAULT_CHUNKSIZE); - create_info.data_file_name= info->data_file_name; - create_info.index_file_name=info->index_file_name; + create_info.raid_chunksize= (info->raid_chunksize ? info->raid_chunksize : + RAID_DEFAULT_CHUNKSIZE); + create_info.data_file_name= info->data_file_name; + create_info.index_file_name= info->index_file_name; /* TODO: Check that the following fn_format is really needed */ error=mi_create(fn_format(buff,name,"","",2+4), - table_arg->keys,keydef, + share->keys,keydef, (uint) (recinfo_pos-recinfo), recinfo, 0, (MI_UNIQUEDEF*) 0, &create_info, @@ -1562,26 +1567,29 @@ ulonglong ha_myisam::get_auto_increment() int error; byte key[MI_MAX_KEY_LENGTH]; - if (!table->next_number_key_offset) + if (!table->s->next_number_key_offset) { // Autoincrement at key-start ha_myisam::info(HA_STATUS_AUTO); return auto_increment_value; } /* it's safe to call the following if bulk_insert isn't on */ - mi_flush_bulk_insert(file, table->next_number_index); + mi_flush_bulk_insert(file, table->s->next_number_index); (void) extra(HA_EXTRA_KEYREAD); key_copy(key, table->record[0], - table->key_info + table->next_number_index, - table->next_number_key_offset); - error=mi_rkey(file,table->record[1],(int) table->next_number_index, - key,table->next_number_key_offset,HA_READ_PREFIX_LAST); + table->key_info + table->s->next_number_index, + table->s->next_number_key_offset); + error= mi_rkey(file,table->record[1],(int) table->s->next_number_index, + key,table->s->next_number_key_offset,HA_READ_PREFIX_LAST); if (error) nr= 1; else + { + /* Get data from record[1] */ nr= ((ulonglong) table->next_number_field-> - val_int_offset(table->rec_buff_length)+1); + val_int_offset(table->s->rec_buff_length)+1); + } extra(HA_EXTRA_NO_KEYREAD); return nr; } diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc index 744128faf69..4cd39660728 100644 --- a/sql/ha_myisammrg.cc +++ b/sql/ha_myisammrg.cc @@ -67,15 +67,15 @@ int ha_myisammrg::open(const char *name, int mode, uint test_if_locked) if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED)) myrg_extra(file,HA_EXTRA_WAIT_LOCK,0); - if (table->reclength != mean_rec_length && mean_rec_length) + if (table->s->reclength != mean_rec_length && mean_rec_length) { DBUG_PRINT("error",("reclength: %d mean_rec_length: %d", - table->reclength, mean_rec_length)); + table->s->reclength, mean_rec_length)); goto err; } #if !defined(BIG_TABLES) || SIZEOF_OFF_T == 4 /* Merge table has more than 2G rows */ - if (table->crashed) + if (table->s->crashed) goto err; #endif return (0); @@ -241,14 +241,14 @@ void ha_myisammrg::info(uint flag) #if !defined(BIG_TABLES) || SIZEOF_OFF_T == 4 if ((info.records >= (ulonglong) 1 << 32) || (info.deleted >= (ulonglong) 1 << 32)) - table->crashed=1; + table->s->crashed= 1; #endif data_file_length=info.data_file_length; errkey = info.errkey; - table->keys_in_use.set_prefix(table->keys); - table->db_options_in_use = info.options; - table->is_view=1; - mean_rec_length=info.reclength; + table->s->keys_in_use.set_prefix(table->s->keys); + table->s->db_options_in_use= info.options; + table->s->is_view= 1; + mean_rec_length= info.reclength; block_size=0; update_time=0; #if SIZEOF_OFF_T > 4 @@ -258,10 +258,10 @@ void ha_myisammrg::info(uint flag) #endif if (flag & HA_STATUS_CONST) { - if (table->key_parts && info.rec_per_key) + if (table->s->key_parts && info.rec_per_key) memcpy((char*) table->key_info[0].rec_per_key, (char*) info.rec_per_key, - sizeof(table->key_info[0].rec_per_key)*table->key_parts); + sizeof(table->key_info[0].rec_per_key)*table->s->key_parts); } } @@ -361,7 +361,7 @@ void ha_myisammrg::update_create_info(HA_CREATE_INFO *create_info) if (!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST)))) goto err; split_file_name(open_table->table->filename, &db, &name); - if (!(ptr->real_name= thd->strmake(name.str, name.length))) + if (!(ptr->table_name= thd->strmake(name.str, name.length))) goto err; if (db.length && !(ptr->db= thd->strmake(db.str, db.length))) goto err; @@ -388,35 +388,36 @@ err: int ha_myisammrg::create(const char *name, register TABLE *form, HA_CREATE_INFO *create_info) { - char buff[FN_REFLEN],**table_names,**pos; + char buff[FN_REFLEN]; + const char **table_names, **pos; TABLE_LIST *tables= (TABLE_LIST*) create_info->merge_list.first; THD *thd= current_thd; DBUG_ENTER("ha_myisammrg::create"); - if (!(table_names= (char**) thd->alloc((create_info->merge_list.elements+1)* - sizeof(char*)))) + if (!(table_names= (const char**) + thd->alloc((create_info->merge_list.elements+1) * sizeof(char*)))) DBUG_RETURN(HA_ERR_OUT_OF_MEM); for (pos= table_names; tables; tables= tables->next_local) { - char *table_name; + const char *table_name; TABLE **tbl= 0; if (create_info->options & HA_LEX_CREATE_TMP_TABLE) - tbl= find_temporary_table(thd, tables->db, tables->real_name); + tbl= find_temporary_table(thd, tables->db, tables->table_name); if (!tbl) { uint length= my_snprintf(buff,FN_REFLEN,"%s%s/%s", mysql_real_data_home, - tables->db, tables->real_name); + tables->db, tables->table_name); if (!(table_name= thd->strmake(buff, length))) DBUG_RETURN(HA_ERR_OUT_OF_MEM); } else - table_name=(*tbl)->path; + table_name= (*tbl)->s->path; *pos++= table_name; } *pos=0; DBUG_RETURN(myrg_create(fn_format(buff,name,"","",2+4+16), - (const char **) table_names, + table_names, create_info->merge_insert_method, (my_bool) 0)); } @@ -436,7 +437,7 @@ void ha_myisammrg::append_create_info(String *packet) packet->append(" UNION=(",8); MYRG_TABLE *open_table,*first; - current_db= table->table_cache_key; + current_db= table->s->db; db_length= strlen(current_db); for (first=open_table=file->open_tables ; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index fba505f5a12..cba91f9f24d 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -161,7 +161,9 @@ int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans) if (m_batch_execute) return 0; #endif - return trans->execute(NoCommit,AbortOnError,h->m_force_send); + return trans->execute(NdbTransaction::NoCommit, + NdbTransaction::AbortOnError, + h->m_force_send); } inline @@ -172,7 +174,9 @@ int execute_commit(ha_ndbcluster *h, NdbTransaction *trans) if (m_batch_execute) return 0; #endif - return trans->execute(Commit,AbortOnError,h->m_force_send); + return trans->execute(NdbTransaction::Commit, + NdbTransaction::AbortOnError, + h->m_force_send); } inline @@ -183,7 +187,9 @@ int execute_commit(THD *thd, NdbTransaction *trans) if (m_batch_execute) return 0; #endif - return trans->execute(Commit,AbortOnError,thd->variables.ndb_force_send); + return trans->execute(NdbTransaction::Commit, + NdbTransaction::AbortOnError, + thd->variables.ndb_force_send); } inline @@ -194,7 +200,9 @@ int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans) if (m_batch_execute) return 0; #endif - return trans->execute(NoCommit,AO_IgnoreError,h->m_force_send); + return trans->execute(NdbTransaction::NoCommit, + NdbTransaction::AO_IgnoreError, + h->m_force_send); } /* @@ -435,7 +443,7 @@ struct Ndb_table_local_info { void ha_ndbcluster::set_rec_per_key() { DBUG_ENTER("ha_ndbcluster::get_status_const"); - for (uint i=0 ; i < table->keys ; i++) + for (uint i=0 ; i < table->s->keys ; i++) { table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]= 1; } @@ -544,7 +552,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) NDBDICT *dict= ndb->getDictionary(); DBUG_PRINT("info", ("invalidateTable %s", m_tabname)); dict->invalidateTable(m_tabname); - table->version=0L; /* Free when thread is ready */ + table->s->version= 0L; /* Free when thread is ready */ break; } default: @@ -554,7 +562,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) DBUG_PRINT("info", ("transformed ndbcluster error %d to mysql error %d", err.code, res)); if (res == HA_ERR_FOUND_DUPP_KEY) - m_dupkey= table->primary_key; + m_dupkey= table->s->primary_key; DBUG_RETURN(res); } @@ -608,6 +616,7 @@ static inline bool ndb_supported_type(enum_field_types type) case MYSQL_TYPE_YEAR: case MYSQL_TYPE_STRING: case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_VARCHAR: case MYSQL_TYPE_TINY_BLOB: case MYSQL_TYPE_BLOB: case MYSQL_TYPE_MEDIUM_BLOB: @@ -618,7 +627,6 @@ static inline bool ndb_supported_type(enum_field_types type) return TRUE; case MYSQL_TYPE_NULL: case MYSQL_TYPE_GEOMETRY: - case MYSQL_TYPE_VARCHAR: break; } return FALSE; @@ -788,7 +796,7 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob) for (int loop= 0; loop <= 1; loop++) { uint32 offset= 0; - for (uint i= 0; i < table->fields; i++) + for (uint i= 0; i < table->s->fields; i++) { Field *field= table->field[i]; NdbValue value= m_value[i]; @@ -897,12 +905,12 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field, */ bool ha_ndbcluster::uses_blob_value(bool all_fields) { - if (table->blob_fields == 0) + if (table->s->blob_fields == 0) return FALSE; if (all_fields) return TRUE; { - uint no_fields= table->fields; + uint no_fields= table->s->fields; int i; THD *thd= table->in_use; // They always put blobs at the end.. @@ -1006,13 +1014,13 @@ int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase) char unique_index_name[FN_LEN]; static const char* unique_suffix= "$unique"; KEY* key_info= tab->key_info; - const char **key_name= tab->keynames.type_names; + const char **key_name= tab->s->keynames.type_names; Ndb *ndb= get_ndb(); NdbDictionary::Dictionary *dict= ndb->getDictionary(); DBUG_ENTER("ha_ndbcluster::build_index_list"); // Save information about all known indexes - for (i= 0; i < tab->keys; i++, key_info++, key_name++) + for (i= 0; i < tab->s->keys; i++, key_info++, key_name++) { index_name= *key_name; NDB_INDEX_TYPE idx_type= get_index_type_from_table(i); @@ -1085,12 +1093,12 @@ int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase) NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint inx) const { bool is_hash_index= (table->key_info[inx].algorithm == HA_KEY_ALG_HASH); - if (inx == table->primary_key) + if (inx == table->s->primary_key) return is_hash_index ? PRIMARY_KEY_INDEX : PRIMARY_KEY_ORDERED_INDEX; - else - return ((table->key_info[inx].flags & HA_NOSAME) ? - (is_hash_index ? UNIQUE_INDEX : UNIQUE_ORDERED_INDEX) : - ORDERED_INDEX); + + return ((table->key_info[inx].flags & HA_NOSAME) ? + (is_hash_index ? UNIQUE_INDEX : UNIQUE_ORDERED_INDEX) : + ORDERED_INDEX); } int ha_ndbcluster::check_index_fields_not_null(uint inx) @@ -1205,10 +1213,28 @@ inline ulong ha_ndbcluster::index_flags(uint idx_no, uint part, DBUG_RETURN(index_type_flags[get_index_type_from_table(idx_no)]); } +static void shrink_varchar(Field* field, const byte* & ptr, char* buf) +{ + if (field->type() == MYSQL_TYPE_VARCHAR) { + Field_varstring* f= (Field_varstring*)field; + if (f->length_bytes < 256) { + uint pack_len= field->pack_length(); + DBUG_ASSERT(1 <= pack_len && pack_len <= 256); + if (ptr[1] == 0) { + buf[0]= ptr[0]; + } else { + DBUG_ASSERT(false); + buf[0]= 255; + } + memmove(buf + 1, ptr + 2, pack_len - 1); + ptr= buf; + } + } +} int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key) { - KEY* key_info= table->key_info + table->primary_key; + KEY* key_info= table->key_info + table->s->primary_key; KEY_PART_INFO* key_part= key_info->key_part; KEY_PART_INFO* end= key_part+key_info->key_parts; DBUG_ENTER("set_primary_key"); @@ -1216,10 +1242,13 @@ int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key) for (; key_part != end; key_part++) { Field* field= key_part->field; + const byte* ptr= key; + char buf[256]; + shrink_varchar(field, ptr, buf); if (set_ndb_key(op, field, - key_part->fieldnr-1, key)) + key_part->fieldnr-1, ptr)) ERR_RETURN(op->getNdbError()); - key += key_part->length; + key += key_part->store_length; } DBUG_RETURN(0); } @@ -1227,7 +1256,7 @@ int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key) int ha_ndbcluster::set_primary_key_from_old_data(NdbOperation *op, const byte *old_data) { - KEY* key_info= table->key_info + table->primary_key; + KEY* key_info= table->key_info + table->s->primary_key; KEY_PART_INFO* key_part= key_info->key_part; KEY_PART_INFO* end= key_part+key_info->key_parts; DBUG_ENTER("set_primary_key_from_old_data"); @@ -1246,7 +1275,7 @@ int ha_ndbcluster::set_primary_key_from_old_data(NdbOperation *op, const byte *o int ha_ndbcluster::set_primary_key(NdbOperation *op) { DBUG_ENTER("set_primary_key"); - KEY* key_info= table->key_info + table->primary_key; + KEY* key_info= table->key_info + table->s->primary_key; KEY_PART_INFO* key_part= key_info->key_part; KEY_PART_INFO* end= key_part+key_info->key_parts; @@ -1272,8 +1301,11 @@ ha_ndbcluster::set_index_key(NdbOperation *op, for (i= 0; key_part != end; key_part++, i++) { - if (set_ndb_key(op, key_part->field, i, - key_part->null_bit ? key_ptr + 1 : key_ptr)) + Field* field= key_part->field; + const byte* ptr= key_part->null_bit ? key_ptr + 1 : key_ptr; + char buf[256]; + shrink_varchar(field, ptr, buf); + if (set_ndb_key(op, field, i, ptr)) ERR_RETURN(m_active_trans->getNdbError()); key_ptr+= key_part->store_length; } @@ -1286,14 +1318,14 @@ ha_ndbcluster::set_index_key(NdbOperation *op, int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf) { + uint no_fields= table->s->fields, i; + NdbConnection *trans= m_active_trans; + NdbOperation *op; + THD *thd= current_thd; int res; DBUG_ENTER("pk_read"); DBUG_PRINT("enter", ("key_len: %u", key_len)); DBUG_DUMP("key", (char*)key, key_len); - uint no_fields= table->fields, i; - NdbTransaction *trans= m_active_trans; - NdbOperation *op; - THD *thd= current_thd; NdbOperation::LockMode lm= (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); @@ -1301,7 +1333,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf) op->readTuple(lm) != 0) ERR_RETURN(trans->getNdbError()); - if (table->primary_key == MAX_KEY) + if (table->s->primary_key == MAX_KEY) { // This table has no primary key, use "hidden" primary key DBUG_PRINT("info", ("Using hidden key")); @@ -1340,7 +1372,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf) int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data) { - uint no_fields= table->fields, i; + uint no_fields= table->s->fields, i; NdbTransaction *trans= m_active_trans; NdbOperation *op; THD *thd= current_thd; @@ -1734,7 +1766,10 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, char truncated_field_name[NDB_MAX_ATTR_NAME_SIZE]; strnmov(truncated_field_name,field->field_name,sizeof(truncated_field_name)); truncated_field_name[sizeof(truncated_field_name)-1]= '\0'; - if (op->setBound(truncated_field_name, p.bound_type, p.bound_ptr)) + const char* ptr= p.bound_ptr; + char buf[256]; + shrink_varchar(field, ptr, buf); + if (op->setBound(truncated_field_name, p.bound_type, ptr)) ERR_RETURN(op->getNdbError()); } } @@ -1756,7 +1791,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op) DBUG_ENTER("define_read_attrs"); // Define attributes to read - for (i= 0; i < table->fields; i++) + for (i= 0; i < table->s->fields; i++) { Field *field= table->field[i]; if ((thd->query_id == field->query_id) || @@ -1772,11 +1807,11 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op) } } - if (table->primary_key == MAX_KEY) + if (table->s->primary_key == MAX_KEY) { DBUG_PRINT("info", ("Getting hidden key")); // Scanning table with no primary key - int hidden_no= table->fields; + int hidden_no= table->s->fields; #ifndef DBUG_OFF const NDBTAB *tab= (const NDBTAB *) m_table; if (!tab->getColumn(hidden_no)) @@ -1987,13 +2022,13 @@ int ha_ndbcluster::write_row(byte *record) DBUG_ENTER("write_row"); - if(m_ignore_dup_key && table->primary_key != MAX_KEY) + if (m_ignore_dup_key && table->s->primary_key != MAX_KEY) { int peek_res= peek_row(); if (!peek_res) { - m_dupkey= table->primary_key; + m_dupkey= table->s->primary_key; DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY); } if (peek_res != HA_ERR_KEY_NOT_FOUND) @@ -2012,12 +2047,12 @@ int ha_ndbcluster::write_row(byte *record) if (res != 0) ERR_RETURN(trans->getNdbError()); - if (table->primary_key == MAX_KEY) + if (table->s->primary_key == MAX_KEY) { // Table has hidden primary key Ndb *ndb= get_ndb(); Uint64 auto_value= ndb->getAutoIncrementValue((const NDBTAB *) m_table); - if (set_hidden_key(op, table->fields, (const byte*)&auto_value)) + if (set_hidden_key(op, table->s->fields, (const byte*)&auto_value)) ERR_RETURN(op->getNdbError()); } else @@ -2037,7 +2072,7 @@ int ha_ndbcluster::write_row(byte *record) // Set non-key attribute(s) bool set_blob_value= FALSE; - for (i= 0; i < table->fields; i++) + for (i= 0; i < table->s->fields; i++) { Field *field= table->field[i]; if (!(field->flags & PRI_KEY_FLAG) && @@ -2160,8 +2195,8 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) table->timestamp_field->set_time(); /* Check for update of primary key for special handling */ - if ((table->primary_key != MAX_KEY) && - (key_cmp(table->primary_key, old_data, new_data))) + if ((table->s->primary_key != MAX_KEY) && + (key_cmp(table->s->primary_key, old_data, new_data))) { int read_res, insert_res, delete_res; @@ -2217,14 +2252,14 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) op->updateTuple() != 0) ERR_RETURN(trans->getNdbError()); - if (table->primary_key == MAX_KEY) + if (table->s->primary_key == MAX_KEY) { // This table has no primary key, use "hidden" primary key DBUG_PRINT("info", ("Using hidden key")); // Require that the PK for this record has previously been // read into m_value - uint no_fields= table->fields; + uint no_fields= table->s->fields; const NdbRecAttr* rec= m_value[no_fields].rec; DBUG_ASSERT(rec); DBUG_DUMP("key", (char*)rec->aRef(), NDB_HIDDEN_PRIMARY_KEY_LENGTH); @@ -2241,7 +2276,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) } // Set non-key attribute(s) - for (i= 0; i < table->fields; i++) + for (i= 0; i < table->s->fields; i++) { Field *field= table->field[i]; if (((thd->query_id == field->query_id) || m_retrieve_all_fields) && @@ -2302,11 +2337,11 @@ int ha_ndbcluster::delete_row(const byte *record) no_uncommitted_rows_update(-1); - if (table->primary_key == MAX_KEY) + if (table->s->primary_key == MAX_KEY) { // This table has no primary key, use "hidden" primary key DBUG_PRINT("info", ("Using hidden key")); - uint no_fields= table->fields; + uint no_fields= table->s->fields; const NdbRecAttr* rec= m_value[no_fields].rec; DBUG_ASSERT(rec != NULL); @@ -2352,11 +2387,11 @@ void ha_ndbcluster::unpack_record(byte* buf) NdbValue *value= m_value; DBUG_ENTER("unpack_record"); - end= table->field + table->fields; + end= table->field + table->s->fields; // Set null flag(s) - bzero(buf, table->null_bytes); - for (field= table->field, end= field+table->fields; + bzero(buf, table->s->null_bytes); + for (field= table->field; field < end; field++, value++) { @@ -2399,10 +2434,10 @@ void ha_ndbcluster::unpack_record(byte* buf) #ifndef DBUG_OFF // Read and print all values that was fetched - if (table->primary_key == MAX_KEY) + if (table->s->primary_key == MAX_KEY) { // Table with hidden primary key - int hidden_no= table->fields; + int hidden_no= table->s->fields; const NDBTAB *tab= (const NDBTAB *) m_table; const NDBCOL *hidden_col= tab->getColumn(hidden_no); const NdbRecAttr* rec= m_value[hidden_no].rec; @@ -2429,7 +2464,7 @@ void ha_ndbcluster::print_results() if (!_db_on_) DBUG_VOID_RETURN; - for (uint f=0; ffields;f++) + for (uint f=0; fs->fields;f++) { // Use DBUG_PRINT since DBUG_FILE cannot be filtered out char buf[2000]; @@ -2539,21 +2574,26 @@ void ha_ndbcluster::print_results() my_snprintf(buf, sizeof(buf), "Decimal '%-*s'", field->pack_length(), value); break; } - case NdbDictionary::Column::Char:{ + case NdbDictionary::Column::Char: { const char *value= (char*)ptr; my_snprintf(buf, sizeof(buf), "Char '%.*s'", field->pack_length(), value); break; } - case NdbDictionary::Column::Varchar: - case NdbDictionary::Column::Binary: - case NdbDictionary::Column::Varbinary: { - const char *value= (char*)ptr; - my_snprintf(buf, sizeof(buf), "Var '%.*s'", field->pack_length(), value); + case NdbDictionary::Column::Varchar: { + uint len= *(uchar*)ptr; + const char *value= (char*)ptr + 1; + my_snprintf(buf, sizeof(buf), "Varchar (%u)'%.*s'", len, len, value); break; } - case NdbDictionary::Column::Bit: { + case NdbDictionary::Column::Binary: { const char *value= (char*)ptr; - my_snprintf(buf, sizeof(buf), "Bit '%.*s'", field->pack_length(), value); + my_snprintf(buf, sizeof(buf), "Binary '%.*s'", field->pack_length(), value); + break; + } + case NdbDictionary::Column::Varbinary: { + uint len= *(uchar*)ptr; + const char *value= (char*)ptr + 1; + my_snprintf(buf, sizeof(buf), "Varbinary (%u)'%.*s'", len, len, value); break; } case NdbDictionary::Column::Datetime: { @@ -2578,11 +2618,28 @@ void ha_ndbcluster::print_results() my_snprintf(buf, sizeof(buf), "Text [len=%u]", (unsigned)len); break; } + case NdbDictionary::Column::Bit: { + const char *value= (char*)ptr; + my_snprintf(buf, sizeof(buf), "Bit '%.*s'", field->pack_length(), value); + break; + } + case NdbDictionary::Column::Longvarchar: { + uint len= uint2korr(ptr); + const char *value= (char*)ptr + 2; + my_snprintf(buf, sizeof(buf), "Longvarchar (%u)'%.*s'", len, len, value); + break; + } + case NdbDictionary::Column::Longvarbinary: { + uint len= uint2korr(ptr); + const char *value= (char*)ptr + 2; + my_snprintf(buf, sizeof(buf), "Longvarbinary (%u)'%.*s'", len, len, value); + break; + } case NdbDictionary::Column::Undefined: my_snprintf(buf, sizeof(buf), "Unknown type: %d", col->getType()); break; } - + print_value: DBUG_PRINT("value", ("%u,%s: %s", f, col->getName(), buf)); } @@ -2833,7 +2890,7 @@ int ha_ndbcluster::rnd_init(bool scan) int res= cursor->restart(m_force_send); DBUG_ASSERT(res == 0); } - index_init(table->primary_key); + index_init(table->s->primary_key); DBUG_RETURN(0); } @@ -2918,9 +2975,9 @@ void ha_ndbcluster::position(const byte *record) byte *buff; DBUG_ENTER("position"); - if (table->primary_key != MAX_KEY) + if (table->s->primary_key != MAX_KEY) { - key_info= table->key_info + table->primary_key; + key_info= table->key_info + table->s->primary_key; key_part= key_info->key_part; end= key_part + key_info->key_parts; buff= ref; @@ -2944,7 +3001,7 @@ void ha_ndbcluster::position(const byte *record) { // No primary key, get hidden key DBUG_PRINT("info", ("Getting hidden key")); - int hidden_no= table->fields; + int hidden_no= table->s->fields; const NdbRecAttr* rec= m_value[hidden_no].rec; const NDBTAB *tab= (const NDBTAB *) m_table; const NDBCOL *hidden_col= tab->getColumn(hidden_no); @@ -2986,7 +3043,7 @@ void ha_ndbcluster::info(uint flag) if ((my_errno= check_ndb_connection())) DBUG_VOID_RETURN; Ndb *ndb= get_ndb(); - Uint64 rows; + Uint64 rows= 100; if (current_thd->variables.ndb_use_exact_count) ndb_get_table_statistics(ndb, m_tabname, &rows, 0); records= rows; @@ -3543,7 +3600,7 @@ int ndbcluster_rollback(THD *thd, void *ndb_transaction) "stmt" : "all")); DBUG_ASSERT(ndb && trans); - if (trans->execute(Rollback) != 0) + if (trans->execute(NdbTransaction::Rollback) != 0) { const NdbError err= trans->getNdbError(); const NdbOperation *error_op= trans->getNdbErrorOperation(); @@ -3662,14 +3719,34 @@ static int create_ndb_column(NDBCOL &col, col.setLength(field->pack_length()); } break; - case MYSQL_TYPE_VAR_STRING: - if (field->flags & BINARY_FLAG) - col.setType(NDBCOL::Varbinary); - else { - col.setType(NDBCOL::Varchar); - col.setCharset(cs); + case MYSQL_TYPE_VAR_STRING: // ? + case MYSQL_TYPE_VARCHAR: + { + Field_varstring* f= (Field_varstring*)field; + if (f->length_bytes == 1) + { + if (field->flags & BINARY_FLAG) + col.setType(NDBCOL::Varbinary); + else { + col.setType(NDBCOL::Varchar); + col.setCharset(cs); + } + } + else if (f->length_bytes == 2) + { + if (field->flags & BINARY_FLAG) + col.setType(NDBCOL::Longvarbinary); + else { + col.setType(NDBCOL::Longvarchar); + col.setCharset(cs); + } + } + else + { + return HA_ERR_UNSUPPORTED; + } + col.setLength(field->field_length); } - col.setLength(field->pack_length()); break; // Blob types (all come in as MYSQL_TYPE_BLOB) mysql_type_tiny_blob: @@ -3785,11 +3862,11 @@ int ha_ndbcluster::create(const char *name, NDBCOL col; uint pack_length, length, i, pk_length= 0; const void *data, *pack_data; - const char **key_names= form->keynames.type_names; + const char **key_names= form->s->keynames.type_names; char name2[FN_HEADLEN]; bool create_from_engine= (info->table_options & HA_CREATE_FROM_ENGINE); - DBUG_ENTER("create"); + DBUG_ENTER("ha_ndbcluster::create"); DBUG_PRINT("enter", ("name: %s", name)); fn_format(name2, name, "", "",2); // Remove the .frm extension set_dbname(name2); @@ -3821,7 +3898,7 @@ int ha_ndbcluster::create(const char *name, my_free((char*)data, MYF(0)); my_free((char*)pack_data, MYF(0)); - for (i= 0; i < form->fields; i++) + for (i= 0; i < form->s->fields; i++) { Field *field= form->field[i]; DBUG_PRINT("info", ("name: %s, type: %u, pack_length: %d", @@ -3835,7 +3912,7 @@ int ha_ndbcluster::create(const char *name, } // No primary key, create shadow key as 64 bit, auto increment - if (form->primary_key == MAX_KEY) + if (form->s->primary_key == MAX_KEY) { DBUG_PRINT("info", ("Generating shadow key")); col.setName("$PK"); @@ -3849,7 +3926,7 @@ int ha_ndbcluster::create(const char *name, } // Make sure that blob tables don't have to big part size - for (i= 0; i < form->fields; i++) + for (i= 0; i < form->s->fields; i++) { /** * The extra +7 concists @@ -4124,7 +4201,6 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_table_flags(HA_REC_NOT_IN_SEQ | HA_NULL_IN_KEY | HA_AUTO_PART_KEY | - HA_NO_VARCHAR | HA_NO_PREFIX_CHAR_KEYS | HA_NEED_READ_RANGE_BUFFER | HA_CAN_BIT_FIELD), @@ -4219,9 +4295,9 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked) // Setup ref_length to make room for the whole // primary key to be written in the ref variable - if (table->primary_key != MAX_KEY) + if (table->s->primary_key != MAX_KEY) { - key= table->key_info+table->primary_key; + key= table->key_info+table->s->primary_key; ref_length= key->key_length; DBUG_PRINT("info", (" ref_length: %d", ref_length)); } @@ -4560,7 +4636,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, TABLE_LIST table_list; bzero((char*) &table_list,sizeof(table_list)); table_list.db= (char*) db; - table_list.alias=table_list.real_name=(char*)file_name; + table_list.alias= table_list.table_name= (char*)file_name; (void)mysql_rm_table_part2(thd, &table_list, /* if_exists */ TRUE, /* drop_temporary */ FALSE, @@ -4695,7 +4771,7 @@ void ndbcluster_print_error(int error, const NdbOperation *error_op) DBUG_ENTER("ndbcluster_print_error"); TABLE tab; const char *tab_name= (error_op) ? error_op->getTableName() : ""; - tab.table_name= (char *) tab_name; + tab.alias= (char *) tab_name; ha_ndbcluster error_handler(&tab); tab.file= &error_handler; error_handler.print_error(error, MYF(0)); @@ -5066,7 +5142,9 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, pOp->getValue(NdbDictionary::Column::ROW_COUNT, (char*)&rows); pOp->getValue(NdbDictionary::Column::COMMIT_COUNT, (char*)&commits); - check= pTrans->execute(NoCommit, AbortOnError, TRUE); + check= pTrans->execute(NdbTransaction::NoCommit, + NdbTransaction::AbortOnError, + TRUE); if (check == -1) break; @@ -5135,7 +5213,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, int res; KEY* key_info= table->key_info + active_index; NDB_INDEX_TYPE index_type= get_index_type(active_index); - ulong reclength= table->reclength; + ulong reclength= table->s->reclength; NdbOperation* op; if (uses_blob_value(m_retrieve_all_fields)) @@ -5317,7 +5395,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p) int res; int range_no; - ulong reclength= table->reclength; + ulong reclength= table->s->reclength; const NdbOperation* op= m_current_multi_operation; for(;multi_range_curr < m_multi_range_defined; multi_range_curr++) { @@ -5454,7 +5532,7 @@ ha_ndbcluster::setup_recattr(const NdbRecAttr* curr) Field **field, **end; NdbValue *value= m_value; - end= table->field + table->fields; + end= table->field + table->s->fields; for (field= table->field; field < end; field++, value++) { diff --git a/sql/handler.cc b/sql/handler.cc index cde2c7b02a8..bbe01dd93d5 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -1017,7 +1017,7 @@ int handler::ha_open(const char *name, int mode, int test_if_locked) int error; DBUG_ENTER("handler::ha_open"); DBUG_PRINT("enter",("name: %s db_type: %d db_stat: %d mode: %d lock_test: %d", - name, table->db_type, table->db_stat, mode, + name, table->s->db_type, table->db_stat, mode, test_if_locked)); if ((error=open(name,mode,test_if_locked))) @@ -1036,7 +1036,7 @@ int handler::ha_open(const char *name, int mode, int test_if_locked) } else { - if (table->db_options_in_use & HA_OPTION_READ_ONLY_DATA) + if (table->s->db_options_in_use & HA_OPTION_READ_ONLY_DATA) table->db_stat|=HA_READ_ONLY; (void) extra(HA_EXTRA_NO_READCHECK); // Not needed in SQL @@ -1214,7 +1214,7 @@ void handler::update_auto_increment() first key part, as there is no guarantee that the first parts will be in sequence */ - if (!table->next_number_key_offset) + if (!table->s->next_number_key_offset) { /* Set next insert id to point to next auto-increment value to be able to @@ -1252,8 +1252,8 @@ ulonglong handler::get_auto_increment() int error; (void) extra(HA_EXTRA_KEYREAD); - index_init(table->next_number_index); - if (!table->next_number_key_offset) + index_init(table->s->next_number_index); + if (!table->s->next_number_key_offset) { // Autoincrement at key-start error=index_last(table->record[1]); } @@ -1261,17 +1261,17 @@ ulonglong handler::get_auto_increment() { byte key[MAX_KEY_LENGTH]; key_copy(key, table->record[0], - table->key_info + table->next_number_index, - table->next_number_key_offset); - error=index_read(table->record[1], key, table->next_number_key_offset, - HA_READ_PREFIX_LAST); + table->key_info + table->s->next_number_index, + table->s->next_number_key_offset); + error= index_read(table->record[1], key, table->s->next_number_key_offset, + HA_READ_PREFIX_LAST); } if (error) nr=1; else - nr=((ulonglong) table->next_number_field-> - val_int_offset(table->rec_buff_length)+1); + nr= ((ulonglong) table->next_number_field-> + val_int_offset(table->s->rec_buff_length)+1); index_end(); (void) extra(HA_EXTRA_NO_KEYREAD); return nr; @@ -1396,10 +1396,10 @@ void handler::print_error(int error, myf errflag) */ char *db; char buff[FN_REFLEN]; - uint length=dirname_part(buff,table->path); + uint length= dirname_part(buff,table->s->path); buff[length-1]=0; db=buff+dirname_length(buff); - my_error(ER_NO_SUCH_TABLE, MYF(0), db, table->table_name); + my_error(ER_NO_SUCH_TABLE, MYF(0), db, table->alias); break; } default: @@ -1422,7 +1422,7 @@ void handler::print_error(int error, myf errflag) DBUG_VOID_RETURN; } } - my_error(textno, errflag, table->table_name, error); + my_error(textno, errflag, table->alias, error); DBUG_VOID_RETURN; } diff --git a/sql/item.cc b/sql/item.cc index 7dba1f3a66a..c84496f8eb7 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -569,7 +569,7 @@ bool DTCollation::aggregate(DTCollation &dt, uint flags) } Item_field::Item_field(Field *f) - :Item_ident(NullS, f->table_name, f->field_name), + :Item_ident(NullS, *f->table_name, f->field_name), item_equal(0), no_const_subst(0), have_privileges(0), any_privileges(0) { @@ -582,7 +582,7 @@ Item_field::Item_field(Field *f) } Item_field::Item_field(THD *thd, Field *f) - :Item_ident(f->table->table_cache_key, f->table_name, f->field_name), + :Item_ident(f->table->s->db, *f->table_name, f->field_name), item_equal(0), no_const_subst(0), have_privileges(0), any_privileges(0) { @@ -636,9 +636,9 @@ void Item_field::set_field(Field *field_par) maybe_null=field->maybe_null(); max_length=field_par->field_length; decimals= field->decimals(); - table_name=field_par->table_name; - field_name=field_par->field_name; - db_name=field_par->table->table_cache_key; + table_name= *field_par->table_name; + field_name= field_par->field_name; + db_name= field_par->table->s->db; alias_name_used= field_par->table->alias_name_used; unsigned_flag=test(field_par->flags & UNSIGNED_FLAG); collation.set(field_par->charset(), DERIVATION_IMPLICIT); @@ -1576,16 +1576,18 @@ bool Item_ref_null_helper::get_date(TIME *ltime, uint fuzzydate) static void mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current, Item_ident *item) { - // store pointer on SELECT_LEX from which item is dependent + const char *db_name= item->db_name ? item->db_name : ""; + const char *table_name= item->table_name ? item->table_name : ""; + /* store pointer on SELECT_LEX from which item is dependent */ item->depended_from= last; current->mark_as_dependent(last); if (thd->lex->describe & DESCRIBE_EXTENDED) { char warn_buff[MYSQL_ERRMSG_SIZE]; sprintf(warn_buff, ER(ER_WARN_FIELD_RESOLVED), - (item->db_name?item->db_name:""), (item->db_name?".":""), - (item->table_name?item->table_name:""), (item->table_name?".":""), - item->field_name, + db_name, (db_name[0] ? "." : ""), + table_name, (table_name [0] ? "." : ""), + item->field_name, current->select_number, last->select_number); push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_WARN_FIELD_RESOLVED, warn_buff); @@ -1745,8 +1747,9 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select) Search for a column or derived column named as 'ref' in the SELECT clause of the current select. */ - if (!(select_ref= find_item_in_list(ref, *(select->get_item_list()), &counter, - REPORT_EXCEPT_NOT_FOUND, ¬_used))) + if (!(select_ref= find_item_in_list(ref, *(select->get_item_list()), + &counter, REPORT_EXCEPT_NOT_FOUND, + ¬_used))) return NULL; /* Some error occurred. */ /* If this is a non-aggregated field inside HAVING, search in GROUP BY. */ @@ -2053,7 +2056,7 @@ bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **reference) else { db= cached_table->db; - tab= cached_table->real_name; + tab= cached_table->table_name; } if (!(have_privileges= (get_column_grant(thd, &field->table->grant, db, tab, field_name) & @@ -3195,7 +3198,7 @@ bool Item_default_value::fix_fields(THD *thd, if (!(def_field= (Field*) sql_alloc(field_arg->field->size_of()))) return TRUE; memcpy(def_field, field_arg->field, field_arg->field->size_of()); - def_field->move_field(def_field->table->default_values - + def_field->move_field(def_field->table->s->default_values - def_field->table->record[0]); set_field(def_field); return FALSE; diff --git a/sql/item_func.cc b/sql/item_func.cc index 28c4ea86ab3..607efe06e77 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -194,8 +194,8 @@ bool Item_func::agg_arg_charsets(DTCollation &coll, } if ((*arg)->type() == FIELD_ITEM) ((Item_field *)(*arg))->no_const_subst= 1; - conv->fix_fields(thd, 0, &conv); *arg= conv; + conv->fix_fields(thd, 0, arg); } if (arena) thd->restore_backup_item_arena(arena, &backup); @@ -3224,7 +3224,7 @@ bool Item_func_match::fix_index() if (key == NO_SUCH_KEY) return 0; - for (keynr=0 ; keynr < table->keys ; keynr++) + for (keynr=0 ; keynr < table->s->keys ; keynr++) { if ((table->key_info[keynr].flags & HA_FULLTEXT) && (table->keys_in_use_for_query.is_set(keynr))) diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index dbf30d7d793..2d4f88ed57b 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1497,7 +1497,7 @@ void subselect_uniquesubquery_engine::print(String *str) str->append("(", 23); tab->ref.items[0]->print(str); str->append(" in ", 4); - str->append(tab->table->real_name); + str->append(tab->table->s->table_name); KEY *key_info= tab->table->key_info+ tab->ref.key; str->append(" on ", 4); str->append(key_info->name); @@ -1515,7 +1515,7 @@ void subselect_indexsubquery_engine::print(String *str) str->append("(", 15); tab->ref.items[0]->print(str); str->append(" in ", 4); - str->append(tab->table->real_name); + str->append(tab->table->s->table_name); KEY *key_info= tab->table->key_info+ tab->ref.key; str->append(" on ", 4); str->append(key_info->name); diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 1c005cf60a9..168c68ad706 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -1225,7 +1225,7 @@ int composite_key_cmp(void* arg, byte* key1, byte* key2) { Item_sum_count_distinct* item = (Item_sum_count_distinct*)arg; Field **field = item->table->field; - Field **field_end= field + item->table->fields; + Field **field_end= field + item->table->s->fields; uint32 *lengths=item->field_lengths; for (; field < field_end; ++field) { @@ -1344,15 +1344,15 @@ bool Item_sum_count_distinct::setup(THD *thd) // no blobs, otherwise it would be MyISAM - if (table->db_type == DB_TYPE_HEAP) + if (table->s->db_type == DB_TYPE_HEAP) { qsort_cmp2 compare_key; void* cmp_arg; // to make things easier for dump_leaf if we ever have to dump to MyISAM - restore_record(table,default_values); + restore_record(table,s->default_values); - if (table->fields == 1) + if (table->s->fields == 1) { /* If we have only one field, which is the most common use of @@ -1396,10 +1396,10 @@ bool Item_sum_count_distinct::setup(THD *thd) { bool all_binary = 1; Field** field, **field_end; - field_end = (field = table->field) + table->fields; + field_end = (field = table->field) + table->s->fields; uint32 *lengths; if (!(field_lengths= - (uint32*) thd->alloc(sizeof(uint32) * table->fields))) + (uint32*) thd->alloc(sizeof(uint32) * table->s->fields))) return 1; for (key_length = 0, lengths=field_lengths; field < field_end; ++field) @@ -1410,7 +1410,7 @@ bool Item_sum_count_distinct::setup(THD *thd) if (!(*field)->binary()) all_binary = 0; // Can't break loop here } - rec_offset = table->reclength - key_length; + rec_offset= table->s->reclength - key_length; if (all_binary) { compare_key = (qsort_cmp2)simple_raw_key_cmp; @@ -1781,7 +1781,7 @@ int dump_leaf_key(byte* key, uint32 count __attribute__((unused)), String *res; char *save_ptr= field->ptr; uint offset= (uint) (save_ptr - record); - DBUG_ASSERT(offset < item->table->reclength); + DBUG_ASSERT(offset < item->table->s->reclength); field->ptr= (char *) key + offset; res= field->val_str(&tmp,&tmp2); item->result.append(*res); @@ -2124,10 +2124,10 @@ bool Item_func_group_concat::setup(THD *thd) table->file->extra(HA_EXTRA_NO_ROWS); table->no_rows= 1; - key_length= table->reclength; + key_length= table->s->reclength; /* Offset to first result field in table */ - field_list_offset= table->fields - (list.elements - const_fields); + field_list_offset= table->s->fields - (list.elements - const_fields); if (tree_mode) delete_tree(tree); diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 27c000138d8..2d0e5d7632f 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -1603,6 +1603,7 @@ void Item_func_from_unixtime::fix_length_and_dec() collation.set(&my_charset_bin); decimals=0; max_length=MAX_DATETIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + maybe_null= 1; thd->time_zone_used= 1; } @@ -1642,11 +1643,12 @@ longlong Item_func_from_unixtime::val_int() bool Item_func_from_unixtime::get_date(TIME *ltime, uint fuzzy_date __attribute__((unused))) { - longlong tmp= args[0]->val_int(); - - if ((null_value= (args[0]->null_value || - tmp < TIMESTAMP_MIN_VALUE || - tmp > TIMESTAMP_MAX_VALUE))) + ulonglong tmp= (ulonglong)(args[0]->val_int()); + /* + "tmp > TIMESTAMP_MAX_VALUE" check also covers case of negative + from_unixtime() argument since tmp is unsigned. + */ + if ((null_value= (args[0]->null_value || tmp > TIMESTAMP_MAX_VALUE))) return 1; thd->variables.time_zone->gmt_sec_to_TIME(ltime, (my_time_t)tmp); @@ -2202,6 +2204,12 @@ String *Item_datetime_typecast::val_str(String *str) bool Item_time_typecast::get_time(TIME *ltime) { bool res= get_arg0_time(ltime); + /* + For MYSQL_TIMESTAMP_TIME value we can have non-zero day part, + which we should not lose. + */ + if (ltime->time_type == MYSQL_TIMESTAMP_DATETIME) + ltime->year= ltime->month= ltime->day= 0; ltime->time_type= MYSQL_TIMESTAMP_TIME; return res; } @@ -2225,6 +2233,7 @@ String *Item_time_typecast::val_str(String *str) bool Item_date_typecast::get_date(TIME *ltime, uint fuzzy_date) { bool res= get_arg0_date(ltime, TIME_FUZZY_DATE); + ltime->hour= ltime->minute= ltime->second= ltime->second_part= 0; ltime->time_type= MYSQL_TIMESTAMP_DATE; return res; } diff --git a/sql/key.cc b/sql/key.cc index d54b8721cab..aec294e370a 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -38,7 +38,9 @@ int find_ref_key(TABLE *table,Field *field, uint *key_length) /* Test if some key starts as fieldpos */ - for (i=0, key_info=table->key_info ; i < (int) table->keys ; i++, key_info++) + for (i= 0, key_info= table->key_info ; + i < (int) table->s->keys ; + i++, key_info++) { if (key_info->key_part[0].offset == fieldpos) { /* Found key. Calc keylength */ @@ -48,7 +50,9 @@ int find_ref_key(TABLE *table,Field *field, uint *key_length) } /* Test if some key contains fieldpos */ - for (i=0, key_info=table->key_info ; i < (int) table->keys ; i++, key_info++) + for (i= 0, key_info= table->key_info ; + i < (int) table->s->keys ; + i++, key_info++) { uint j; KEY_PART_INFO *key_part; @@ -373,9 +377,9 @@ bool check_if_key_used(TABLE *table, uint idx, List &fields) If table handler has primary key as part of the index, check that primary key is not updated */ - if (idx != table->primary_key && table->primary_key < MAX_KEY && + if (idx != table->s->primary_key && table->s->primary_key < MAX_KEY && (table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX)) - return check_if_key_used(table, table->primary_key, fields); + return check_if_key_used(table, table->s->primary_key, fields); return 0; } diff --git a/sql/lock.cc b/sql/lock.cc index 973e82b7b10..fffd48d5305 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -401,7 +401,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, *write_lock_used=0; for (i=tables=lock_count=0 ; i < count ; i++) { - if (table_ptr[i]->tmp_table != TMP_TABLE) + if (table_ptr[i]->s->tmp_table != TMP_TABLE) { tables+=table_ptr[i]->file->lock_count(); lock_count++; @@ -421,7 +421,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, for (i=0 ; i < count ; i++) { TABLE *table; - if ((table=table_ptr[i])->tmp_table == TMP_TABLE) + if ((table=table_ptr[i])->s->tmp_table == TMP_TABLE) continue; *to++=table; enum thr_lock_type lock_type= table->reginfo.lock_type; @@ -430,7 +430,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, *write_lock_used=table; if (table->db_stat & HA_READ_ONLY) { - my_error(ER_OPEN_AS_READONLY, MYF(0), table->table_name); + my_error(ER_OPEN_AS_READONLY, MYF(0), table->alias); my_free((gptr) sql_lock,MYF(0)); return 0; } @@ -526,11 +526,11 @@ int lock_table_name(THD *thd, TABLE_LIST *table_list) char *db= table_list->db; uint key_length; DBUG_ENTER("lock_table_name"); - DBUG_PRINT("enter",("db: %s name: %s", db, table_list->real_name)); + DBUG_PRINT("enter",("db: %s name: %s", db, table_list->table_name)); safe_mutex_assert_owner(&LOCK_open); - key_length=(uint) (strmov(strmov(key,db)+1,table_list->real_name) + key_length=(uint) (strmov(strmov(key,db)+1,table_list->table_name) -key)+ 1; @@ -549,8 +549,9 @@ int lock_table_name(THD *thd, TABLE_LIST *table_list) if (!(table= (TABLE*) my_malloc(sizeof(*table)+key_length, MYF(MY_WME | MY_ZEROFILL)))) DBUG_RETURN(-1); - memcpy((table->table_cache_key= (char*) (table+1)), key, key_length); - table->key_length=key_length; + table->s= &table->share_not_to_be_used; + memcpy((table->s->table_cache_key= (char*) (table+1)), key, key_length); + table->s->key_length=key_length; table->in_use=thd; table->locked_by_name=1; table_list->table=table; @@ -560,7 +561,7 @@ int lock_table_name(THD *thd, TABLE_LIST *table_list) my_free((gptr) table,MYF(0)); DBUG_RETURN(-1); } - if (remove_table_from_cache(thd, db, table_list->real_name)) + if (remove_table_from_cache(thd, db, table_list->table_name)) DBUG_RETURN(1); // Table is in use DBUG_RETURN(0); } diff --git a/sql/log_event.cc b/sql/log_event.cc index 581d3ef0d21..2fa4e09913e 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -2128,7 +2128,7 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex, const char *db_arg, const char *table_name_arg, List &fields_arg, enum enum_duplicates handle_dup, - bool using_trans) + bool ignore, bool using_trans) :Log_event(thd_arg, !thd_arg->tmp_table_used ? 0 : LOG_EVENT_THREAD_SPECIFIC_F, using_trans), thread_id(thd_arg->thread_id), @@ -2166,9 +2166,6 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex, sql_ex.empty_flags= 0; switch (handle_dup) { - case DUP_IGNORE: - sql_ex.opt_flags|= IGNORE_FLAG; - break; case DUP_REPLACE: sql_ex.opt_flags|= REPLACE_FLAG; break; @@ -2176,6 +2173,8 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex, case DUP_ERROR: break; } + if (ignore) + sql_ex.opt_flags|= IGNORE_FLAG; if (!ex->field_term->length()) sql_ex.empty_flags |= FIELD_TERM_EMPTY; @@ -2496,7 +2495,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, TABLE_LIST tables; bzero((char*) &tables,sizeof(tables)); tables.db = thd->db; - tables.alias = tables.real_name = (char*)table_name; + tables.alias = tables.table_name = (char*) table_name; tables.lock_type = TL_WRITE; tables.updating= 1; @@ -2511,37 +2510,41 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, { char llbuff[22]; enum enum_duplicates handle_dup; + bool ignore= 0; /* Make a simplified LOAD DATA INFILE query, for the information of the user in SHOW PROCESSLIST. Note that db is known in the 'db' column. */ if ((load_data_query= (char *) my_alloca(18 + strlen(fname) + 14 + - strlen(tables.real_name) + 8))) + strlen(tables.table_name) + 8))) { thd->query_length= (uint)(strxmov(load_data_query, "LOAD DATA INFILE '", fname, - "' INTO TABLE `", tables.real_name, + "' INTO TABLE `", tables.table_name, "` <...>", NullS) - load_data_query); thd->query= load_data_query; } if (sql_ex.opt_flags & REPLACE_FLAG) handle_dup= DUP_REPLACE; else if (sql_ex.opt_flags & IGNORE_FLAG) - handle_dup= DUP_IGNORE; + { + ignore= 1; + handle_dup= DUP_ERROR; + } else { /* When replication is running fine, if it was DUP_ERROR on the - master then we could choose DUP_IGNORE here, because if DUP_ERROR + master then we could choose IGNORE here, because if DUP_ERROR suceeded on master, and data is identical on the master and slave, - then there should be no uniqueness errors on slave, so DUP_IGNORE is + then there should be no uniqueness errors on slave, so IGNORE is the same as DUP_ERROR. But in the unlikely case of uniqueness errors (because the data on the master and slave happen to be different (user error or bug), we want LOAD DATA to print an error message on the slave to discover the problem. If reading from net (a 3.23 master), mysql_load() will change this - to DUP_IGNORE. + to IGNORE. */ handle_dup= DUP_ERROR; } @@ -2575,8 +2578,8 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, */ thd->net.pkt_nr = net->pkt_nr; } - if (mysql_load(thd, &ex, &tables, field_list, handle_dup, net != 0, - TL_WRITE, 0)) + if (mysql_load(thd, &ex, &tables, field_list, handle_dup, ignore, + net != 0, TL_WRITE)) thd->query_error = 1; if (thd->cuted_fields) { @@ -3495,8 +3498,9 @@ Create_file_log_event:: Create_file_log_event(THD* thd_arg, sql_exchange* ex, const char* db_arg, const char* table_name_arg, List& fields_arg, enum enum_duplicates handle_dup, + bool ignore, char* block_arg, uint block_len_arg, bool using_trans) - :Load_log_event(thd_arg,ex,db_arg,table_name_arg,fields_arg,handle_dup, + :Load_log_event(thd_arg,ex,db_arg,table_name_arg,fields_arg,handle_dup, ignore, using_trans), fake_base(0), block(block_arg), event_buf(0), block_len(block_len_arg), file_id(thd_arg->file_id = mysql_bin_log.next_file_id()) diff --git a/sql/log_event.h b/sql/log_event.h index 390a8c8070d..64bb9d502e9 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -783,7 +783,7 @@ public: Load_log_event(THD* thd, sql_exchange* ex, const char* db_arg, const char* table_name_arg, - List& fields_arg, enum enum_duplicates handle_dup, + List& fields_arg, enum enum_duplicates handle_dup, bool ignore, bool using_trans); void set_fields(const char* db, List &fields_arg); const char* get_db() { return db; } @@ -1170,7 +1170,7 @@ public: Create_file_log_event(THD* thd, sql_exchange* ex, const char* db_arg, const char* table_name_arg, List& fields_arg, - enum enum_duplicates handle_dup, + enum enum_duplicates handle_dup, bool ignore, char* block_arg, uint block_len_arg, bool using_trans); #ifdef HAVE_REPLICATION diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index c5568bf52a4..6f569ea3ef4 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -613,6 +613,7 @@ bool mysql_alter_table(THD *thd, char *new_db, char *new_name, List &keys, uint order_num, ORDER *order, enum enum_duplicates handle_duplicates, + bool ignore, ALTER_INFO *alter_info, bool do_send_ok=1); bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool do_send_ok); bool mysql_create_like_table(THD *thd, TABLE_LIST *table, @@ -631,11 +632,11 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list, int mysql_update(THD *thd,TABLE_LIST *tables,List &fields, List &values,COND *conds, uint order_num, ORDER *order, ha_rows limit, - enum enum_duplicates handle_duplicates); + enum enum_duplicates handle_duplicates, bool ignore); bool mysql_multi_update(THD *thd, TABLE_LIST *table_list, List *fields, List *values, COND *conds, ulong options, - enum enum_duplicates handle_duplicates, + enum enum_duplicates handle_duplicates, bool ignore, SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex); bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table, List &fields, List_item *values, @@ -644,7 +645,8 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table, COND **where, bool select_insert); bool mysql_insert(THD *thd,TABLE_LIST *table,List &fields, List &values, List &update_fields, - List &update_values, enum_duplicates flag); + List &update_values, enum_duplicates flag, + bool ignore); int check_that_all_fields_are_given_values(THD *thd, TABLE *entry); bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds); bool mysql_delete(THD *thd, TABLE_LIST *table, COND *conds, SQL_LIST *order, @@ -882,8 +884,7 @@ bool eval_const_cond(COND *cond); /* sql_load.cc */ bool mysql_load(THD *thd, sql_exchange *ex, TABLE_LIST *table_list, List &fields, enum enum_duplicates handle_duplicates, - bool local_file, thr_lock_type lock_type, - bool ignore_check_option_errors); + bool ignore, bool local_file, thr_lock_type lock_type); int write_record(THD *thd, TABLE *table, COPY_INFO *info); /* sql_manager.cc */ @@ -1268,7 +1269,7 @@ inline void mark_as_null_row(TABLE *table) { table->null_row=1; table->status|=STATUS_NULL_ROW; - bfill(table->null_flags,table->null_bytes,255); + bfill(table->null_flags,table->s->null_bytes,255); } inline void table_case_convert(char * name, uint length) @@ -1318,7 +1319,7 @@ inline void setup_table_map(TABLE *table, TABLE_LIST *table_list, uint tablenr) table->const_table= 0; table->null_row= 0; table->status= STATUS_NO_RECORD; - table->keys_in_use_for_query= table->keys_in_use; + table->keys_in_use_for_query= table->s->keys_in_use; table->maybe_null= test(table->outer_join= table_list->outer_join); table->tablenr= tablenr; table->map= (table_map) 1 << tablenr; diff --git a/sql/opt_range.cc b/sql/opt_range.cc index d5d67212c41..0e46e39960e 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -712,7 +712,7 @@ QUICK_SELECT_I::QUICK_SELECT_I() QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr, bool no_alloc, MEM_ROOT *parent_alloc) - :dont_free(0),error(0),free_file(0),cur_range(NULL),range(0),in_range(0) + :dont_free(0),error(0),free_file(0),in_range(0),cur_range(NULL),range(0) { sorted= 0; index= key_nr; @@ -822,7 +822,7 @@ QUICK_INDEX_MERGE_SELECT::push_quick_back(QUICK_RANGE_SELECT *quick_sel_range) processed separately. */ if (head->file->primary_key_is_clustered() && - quick_sel_range->index == head->primary_key) + quick_sel_range->index == head->s->primary_key) pk_quick_select= quick_sel_range; else return quick_selects.push_back(quick_sel_range); @@ -927,10 +927,10 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler) DBUG_RETURN(0); } - if (!(file= get_new_handler(head, head->db_type))) + if (!(file= get_new_handler(head, head->s->db_type))) goto failure; DBUG_PRINT("info", ("Allocated new handler %p", file)); - if (file->ha_open(head->path, head->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) + if (file->ha_open(head->s->path, head->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) { /* Caller will free the memory */ goto failure; @@ -1551,7 +1551,7 @@ public: static int fill_used_fields_bitmap(PARAM *param) { TABLE *table= param->table; - param->fields_bitmap_size= (table->fields/8 + 1); + param->fields_bitmap_size= (table->s->fields/8 + 1); uchar *tmp; uint pk; if (!(tmp= (uchar*)alloc_root(param->mem_root,param->fields_bitmap_size)) || @@ -1560,13 +1560,13 @@ static int fill_used_fields_bitmap(PARAM *param) return 1; bitmap_clear_all(¶m->needed_fields); - for (uint i= 0; i < table->fields; i++) + for (uint i= 0; i < table->s->fields; i++) { if (param->thd->query_id == table->field[i]->query_id) bitmap_set_bit(¶m->needed_fields, i+1); } - pk= param->table->primary_key; + pk= param->table->s->primary_key; if (param->table->file->primary_key_is_clustered() && pk != MAX_KEY) { /* The table uses clustered PK and it is not internally generated */ @@ -1674,10 +1674,10 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, thd->no_errors=1; // Don't warn about NULL init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); - if (!(param.key_parts = (KEY_PART*) alloc_root(&alloc, - sizeof(KEY_PART)* - head->key_parts)) - || fill_used_fields_bitmap(¶m)) + if (!(param.key_parts= (KEY_PART*) alloc_root(&alloc, + sizeof(KEY_PART)* + head->s->key_parts)) || + fill_used_fields_bitmap(¶m)) { thd->no_errors=0; free_root(&alloc,MYF(0)); // Return memory & allocator @@ -1692,7 +1692,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, This is used in get_mm_parts function. */ key_info= head->key_info; - for (idx=0 ; idx < head->keys ; idx++, key_info++) + for (idx=0 ; idx < head->s->keys ; idx++, key_info++) { KEY_PART_INFO *key_part_info; if (!keys_to_use.is_set(idx)) @@ -1876,7 +1876,7 @@ double get_sweep_read_cost(const PARAM *param, ha_rows records) double result; if (param->table->file->primary_key_is_clustered()) { - result= param->table->file->read_time(param->table->primary_key, + result= param->table->file->read_time(param->table->s->primary_key, records, records); } else @@ -2040,7 +2040,8 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, all_scans_ror_able &= ((*ptree)->n_ror_scans > 0); all_scans_rors &= (*cur_child)->is_ror; if (pk_is_clustered && - param->real_keynr[(*cur_child)->key_idx] == param->table->primary_key) + param->real_keynr[(*cur_child)->key_idx] == + param->table->s->primary_key) { cpk_scan= cur_child; cpk_scan_records= (*cur_child)->records; @@ -2763,13 +2764,15 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree, */ ROR_SCAN_INFO **cur_ror_scan; ROR_SCAN_INFO *cpk_scan= NULL; + uint cpk_no; bool cpk_scan_used= FALSE; + if (!(tree->ror_scans= (ROR_SCAN_INFO**)alloc_root(param->mem_root, sizeof(ROR_SCAN_INFO*)* param->keys))) return NULL; - uint cpk_no= (param->table->file->primary_key_is_clustered())? - param->table->primary_key : MAX_KEY; + cpk_no= ((param->table->file->primary_key_is_clustered()) ? + param->table->s->primary_key : MAX_KEY); for (idx= 0, cur_ror_scan= tree->ror_scans; idx < param->keys; idx++) { @@ -3128,7 +3131,7 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree, read_index_only && (param->table->file->index_flags(keynr, param->max_key_part,1) & HA_KEYREAD_ONLY) && - !(pk_is_clustered && keynr == param->table->primary_key)) + !(pk_is_clustered && keynr == param->table->s->primary_key)) /* We can resolve this by only reading through this key. */ found_read_time= get_index_only_read_time(param,found_records,keynr) + cpu_cost; @@ -4975,8 +4978,8 @@ check_quick_select(PARAM *param,uint idx,SEL_ARG *tree) Clustered PK scan is a special case, check_quick_keys doesn't recognize CPK scans as ROR scans (while actually any CPK scan is a ROR scan). */ - cpk_scan= (param->table->primary_key == param->real_keynr[idx]) && - param->table->file->primary_key_is_clustered(); + cpk_scan= ((param->table->s->primary_key == param->real_keynr[idx]) && + param->table->file->primary_key_is_clustered()); param->is_ror_scan= !cpk_scan; } @@ -5237,12 +5240,13 @@ static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts) { KEY *table_key= param->table->key_info + keynr; KEY_PART_INFO *key_part= table_key->key_part + nparts; - KEY_PART_INFO *key_part_end= table_key->key_part + - table_key->key_parts; + KEY_PART_INFO *key_part_end= (table_key->key_part + + table_key->key_parts); + uint pk_number; if (key_part == key_part_end) return TRUE; - uint pk_number= param->table->primary_key; + pk_number= param->table->s->primary_key; if (!param->table->file->primary_key_is_clustered() || pk_number == MAX_KEY) return FALSE; @@ -5947,7 +5951,7 @@ int QUICK_RANGE_SELECT::get_next_init(void) if (file->table_flags() & HA_NEED_READ_RANGE_BUFFER) { mrange_bufsiz= min(multi_range_bufsiz, - (QUICK_SELECT_I::records + 1)* head->reclength); + (QUICK_SELECT_I::records + 1)* head->s->reclength); while (mrange_bufsiz && ! my_multi_malloc(MYF(MY_WME), @@ -6795,7 +6799,6 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) ORDER *tmp_group; Item *item; Item_field *item_field; - DBUG_ENTER("get_best_group_min_max"); /* Perform few 'cheap' tests whether this access method is applicable. */ @@ -6805,7 +6808,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) ((!join->group_list) && /* Neither GROUP BY nor a DISTINCT query. */ (!join->select_distinct))) DBUG_RETURN(NULL); - if(table->keys == 0) /* There are no indexes to use. */ + if (table->s->keys == 0) /* There are no indexes to use. */ DBUG_RETURN(NULL); /* Analyze the query in more detail. */ @@ -6863,7 +6866,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree) first one. Here we set the variables: group_prefix_len and index_info. */ KEY *cur_index_info= table->key_info; - KEY *cur_index_info_end= cur_index_info + table->keys; + KEY *cur_index_info_end= cur_index_info + table->s->keys; KEY_PART_INFO *cur_part= NULL; KEY_PART_INFO *end_part; /* Last part for loops. */ /* Last index part. */ @@ -8279,21 +8282,26 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range() (cur_range->flag & (EQ_RANGE | NULL_RANGE))) continue; /* Check the next range. */ else if (result) - /* - In all other cases (HA_ERR_*, HA_READ_KEY_EXACT with NO_MIN_RANGE, - HA_READ_AFTER_KEY, HA_READ_KEY_OR_NEXT) if the lookup failed for this - range, it can't succeed for any other subsequent range. - */ + { + /* + In all other cases (HA_ERR_*, HA_READ_KEY_EXACT with NO_MIN_RANGE, + HA_READ_AFTER_KEY, HA_READ_KEY_OR_NEXT) if the lookup failed for this + range, it can't succeed for any other subsequent range. + */ break; + } /* A key was found. */ if (cur_range->flag & EQ_RANGE) break; /* No need to perform the checks below for equal keys. */ if (cur_range->flag & NULL_RANGE) - { /* Remember this key, and continue looking for a non-NULL key that */ - /* satisfies some other condition. */ - memcpy(tmp_record, record, head->rec_buff_length); + { + /* + Remember this key, and continue looking for a non-NULL key that + satisfies some other condition. + */ + memcpy(tmp_record, record, head->s->rec_buff_length); found_null= TRUE; continue; } @@ -8334,7 +8342,7 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range() */ if (found_null && result) { - memcpy(record, tmp_record, head->rec_buff_length); + memcpy(record, tmp_record, head->s->rec_buff_length); result= 0; } return result; diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 80226dcfa2c..ef9babf7713 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -644,7 +644,7 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, uint idx= 0; KEY *keyinfo,*keyinfo_end; - for (keyinfo= table->key_info, keyinfo_end= keyinfo+table->keys ; + for (keyinfo= table->key_info, keyinfo_end= keyinfo+table->s->keys ; keyinfo != keyinfo_end; keyinfo++,idx++) { diff --git a/sql/records.cc b/sql/records.cc index 3c0143d2307..9a506cadf0c 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -26,7 +26,7 @@ static int rr_unpack_from_tempfile(READ_RECORD *info); static int rr_unpack_from_buffer(READ_RECORD *info); static int rr_from_pointers(READ_RECORD *info); static int rr_from_cache(READ_RECORD *info); -static int init_rr_cache(READ_RECORD *info); +static int init_rr_cache(THD *thd, READ_RECORD *info); static int rr_cmp(uchar *a,uchar *b); /* init struct for read with info->read_record */ @@ -84,14 +84,14 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, !(table->file->table_flags() & HA_FAST_KEY_READ) && (table->db_stat & HA_READ_ONLY || table->reginfo.lock_type <= TL_READ_NO_INSERT) && - (ulonglong) table->reclength*(table->file->records+ - table->file->deleted) > + (ulonglong) table->s->reclength* (table->file->records+ + table->file->deleted) > (ulonglong) MIN_FILE_LENGTH_TO_USE_ROW_CACHE && - info->io_cache->end_of_file/info->ref_length*table->reclength > + info->io_cache->end_of_file/info->ref_length * table->s->reclength > (my_off_t) MIN_ROWS_TO_USE_TABLE_CACHE && - !table->blob_fields) + !table->s->blob_fields) { - if (! init_rr_cache(info)) + if (! init_rr_cache(thd, info)) { DBUG_PRINT("info",("using rr_from_cache")); info->read_record=rr_from_cache; @@ -133,7 +133,7 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, if (!table->no_cache && (use_record_cache > 0 || (int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY || - !(table->db_options_in_use & HA_OPTION_PACK_RECORD) || + !(table->s->db_options_in_use & HA_OPTION_PACK_RECORD) || (use_record_cache < 0 && !(table->file->table_flags() & HA_NOT_DELETE_WITH_CACHE)))) VOID(table->file->extra_opt(HA_EXTRA_CACHE, @@ -329,23 +329,21 @@ static int rr_unpack_from_buffer(READ_RECORD *info) } /* cacheing of records from a database */ -static int init_rr_cache(READ_RECORD *info) +static int init_rr_cache(THD *thd, READ_RECORD *info) { uint rec_cache_size; - THD *thd= current_thd; - DBUG_ENTER("init_rr_cache"); - info->struct_length=3+MAX_REFLENGTH; - info->reclength=ALIGN_SIZE(info->table->reclength+1); + info->struct_length= 3+MAX_REFLENGTH; + info->reclength= ALIGN_SIZE(info->table->s->reclength+1); if (info->reclength < info->struct_length) - info->reclength=ALIGN_SIZE(info->struct_length); + info->reclength= ALIGN_SIZE(info->struct_length); - info->error_offset=info->table->reclength; - info->cache_records= thd->variables.read_rnd_buff_size / - (info->reclength+info->struct_length); - rec_cache_size=info->cache_records*info->reclength; - info->rec_cache_size=info->cache_records*info->ref_length; + info->error_offset= info->table->s->reclength; + info->cache_records= (thd->variables.read_rnd_buff_size / + (info->reclength+info->struct_length)); + rec_cache_size= info->cache_records*info->reclength; + info->rec_cache_size= info->cache_records*info->ref_length; // We have to allocate one more byte to use uint3korr (see comments for it) if (info->cache_records <= 2 || @@ -385,7 +383,8 @@ static int rr_from_cache(READ_RECORD *info) else { error=0; - memcpy(info->record,info->cache_pos,(size_t) info->table->reclength); + memcpy(info->record,info->cache_pos, + (size_t) info->table->s->reclength); } info->cache_pos+=info->reclength; return ((int) error); diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc index 7852993b95b..de4ad83fdbb 100644 --- a/sql/repl_failsafe.cc +++ b/sql/repl_failsafe.cc @@ -739,7 +739,7 @@ static int fetch_db_tables(THD *thd, MYSQL *mysql, const char *db, { bzero((char*) &table, sizeof(table)); //just for safe table.db= (char*) db; - table.real_name= (char*) table_name; + table.table_name= (char*) table_name; table.updating= 1; if (!tables_ok(thd, &table)) diff --git a/sql/set_var.cc b/sql/set_var.cc index 94f3b020802..2bbfdac132d 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -3060,7 +3060,7 @@ bool sys_var_thd_storage_engine::check(THD *thd, set_var *var) enum db_type db_type; if (!(res=var->value->val_str(&str)) || !(var->save_result.ulong_value= - (ulong) db_type= ha_resolve_by_name(res->ptr(), res->length())) || + (ulong) (db_type= ha_resolve_by_name(res->ptr(), res->length()))) || ha_checktype(db_type) != db_type) { value= res ? res->c_ptr() : "NULL"; diff --git a/sql/slave.cc b/sql/slave.cc index 6b8559859fc..0bcc1b7e852 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -893,7 +893,7 @@ bool tables_ok(THD* thd, TABLE_LIST* tables) some_tables_updating= 1; end= strmov(hash_key, tables->db ? tables->db : thd->db); *end++= '.'; - len= (uint) (strmov(end, tables->real_name) - hash_key); + len= (uint) (strmov(end, tables->table_name) - hash_key); if (do_table_inited) // if there are any do's { if (hash_search(&replicate_do_table, (byte*) hash_key, len)) @@ -1520,7 +1520,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, bzero((char*) &tables,sizeof(tables)); tables.db = (char*)db; - tables.alias= tables.real_name= (char*)table_name; + tables.alias= tables.table_name= (char*)table_name; /* Drop the table if 'overwrite' is true */ if (overwrite && mysql_rm_table(thd,&tables,1,0)) /* drop if exists */ @@ -1580,7 +1580,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, error=file->repair(thd,&check_opt) != 0; thd->net.vio = save_vio; if (error) - my_error(ER_INDEX_REBUILD, MYF(0), tables.table->real_name); + my_error(ER_INDEX_REBUILD, MYF(0), tables.table->s->table_name); err: close_thread_tables(thd); diff --git a/sql/sp.cc b/sql/sp.cc index 65dad60cda7..84b126e5ecd 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -97,8 +97,8 @@ db_find_routine_aux(THD *thd, int type, sp_name *name, else { for (table= thd->open_tables ; table ; table= table->next) - if (strcmp(table->table_cache_key, "mysql") == 0 && - strcmp(table->real_name, "proc") == 0) + if (strcmp(table->s->db, "mysql") == 0 && + strcmp(table->s->table_name, "proc") == 0) break; } if (table) @@ -109,7 +109,7 @@ db_find_routine_aux(THD *thd, int type, sp_name *name, memset(&tables, 0, sizeof(tables)); tables.db= (char*)"mysql"; - tables.real_name= tables.alias= (char*)"proc"; + tables.table_name= tables.alias= (char*)"proc"; if (! (table= open_ltable(thd, &tables, ltype))) { *tablep= NULL; @@ -158,7 +158,7 @@ db_find_routine(THD *thd, int type, sp_name *name, sp_head **sphp) if (ret != SP_OK) goto done; - if (table->fields != MYSQL_PROC_FIELD_COUNT) + if (table->s->fields != MYSQL_PROC_FIELD_COUNT) { ret= SP_GET_FIELD_FAILED; goto done; @@ -356,16 +356,16 @@ db_create_routine(THD *thd, int type, sp_head *sp) memset(&tables, 0, sizeof(tables)); tables.db= (char*)"mysql"; - tables.real_name= tables.alias= (char*)"proc"; + tables.table_name= tables.alias= (char*)"proc"; if (! (table= open_ltable(thd, &tables, TL_WRITE))) ret= SP_OPEN_TABLE_FAILED; else { - restore_record(table, default_values); // Get default values for fields + restore_record(table, s->default_values); // Get default values for fields strxmov(definer, thd->priv_user, "@", thd->priv_host, NullS); - if (table->fields != MYSQL_PROC_FIELD_COUNT) + if (table->s->fields != MYSQL_PROC_FIELD_COUNT) { ret= SP_GET_FIELD_FAILED; goto done; @@ -562,7 +562,7 @@ db_show_routine_status(THD *thd, int type, const char *wild) memset(&tables, 0, sizeof(tables)); tables.db= (char*)"mysql"; - tables.real_name= tables.alias= (char*)"proc"; + tables.table_name= tables.alias= (char*)"proc"; if (! (table= open_ltable(thd, &tables, TL_READ))) { @@ -668,8 +668,8 @@ sp_drop_db_routines(THD *thd, char *db) keylen= sizeof(key); for (table= thd->open_tables ; table ; table= table->next) - if (strcmp(table->table_cache_key, "mysql") == 0 && - strcmp(table->real_name, "proc") == 0) + if (strcmp(table->s->db, "mysql") == 0 && + strcmp(table->s->table_name, "proc") == 0) break; if (! table) { @@ -677,7 +677,7 @@ sp_drop_db_routines(THD *thd, char *db) memset(&tables, 0, sizeof(tables)); tables.db= (char*)"mysql"; - tables.real_name= tables.alias= (char*)"proc"; + tables.table_name= tables.alias= (char*)"proc"; if (! (table= open_ltable(thd, &tables, TL_WRITE))) DBUG_RETURN(SP_OPEN_TABLE_FAILED); } @@ -749,9 +749,9 @@ sp_exists_routine(THD *thd, TABLE_LIST *tables, bool any, bool no_error) LEX_STRING lex_db; LEX_STRING lex_name; lex_db.length= strlen(table->db); - lex_name.length= strlen(table->real_name); + lex_name.length= strlen(table->table_name); lex_db.str= thd->strmake(table->db, lex_db.length); - lex_name.str= thd->strmake(table->real_name, lex_name.length); + lex_name.str= thd->strmake(table->table_name, lex_name.length); name= new sp_name(lex_db, lex_name); name->init_qname(thd); if (sp_find_procedure(thd, name) != NULL || @@ -766,7 +766,7 @@ sp_exists_routine(THD *thd, TABLE_LIST *tables, bool any, bool no_error) if (!no_error) { my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION or PROCEDURE", - table->real_name); + table->table_name); DBUG_RETURN(-1); } DBUG_RETURN(0); diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 9afc0c04631..3f2969768c5 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -834,10 +834,10 @@ sp_head::restore_lex(THD *thd) char **tb; while ((tb= li++)) - if (my_strcasecmp(system_charset_info, tables->real_name, *tb) == 0) + if (my_strcasecmp(system_charset_info, tables->table_name, *tb) == 0) break; if (! tb) - m_tables.push_back(&tables->real_name); + m_tables.push_back(&tables->table_name); } } #endif diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index ba00c1d3fa7..b607f8a7822 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -166,9 +166,9 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) thd->db= my_strdup("mysql",MYF(0)); thd->db_length=5; // Safety bzero((char*) &tables,sizeof(tables)); - tables[0].alias=tables[0].real_name=(char*) "host"; - tables[1].alias=tables[1].real_name=(char*) "user"; - tables[2].alias=tables[2].real_name=(char*) "db"; + tables[0].alias=tables[0].table_name=(char*) "host"; + tables[1].alias=tables[1].table_name=(char*) "user"; + tables[2].alias=tables[2].table_name=(char*) "db"; tables[0].next_local= tables[0].next_global= tables+1; tables[1].next_local= tables[1].next_global= tables+2; tables[0].lock_type=tables[1].lock_type=tables[2].lock_type=TL_READ; @@ -210,7 +210,7 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) continue; } #ifndef TO_BE_REMOVED - if (table->fields == 8) + if (table->s->fields == 8) { // Without grant if (host.access & CREATE_ACL) host.access|=REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_TMP_ACL; @@ -233,7 +233,7 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) } DBUG_PRINT("info",("user table fields: %d, password length: %d", - table->fields, table->field[2]->field_length)); + table->s->fields, table->field[2]->field_length)); pthread_mutex_lock(&LOCK_global_system_variables); if (table->field[2]->field_length < SCRAMBLED_PASSWORD_CHAR_LENGTH) @@ -306,23 +306,24 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) if it is pre 5.0.1 privilege table then map CREATE privilege on CREATE VIEW & SHOW VIEW privileges */ - if (table->fields <= 31 && (user.access & CREATE_ACL)) + if (table->s->fields <= 31 && (user.access & CREATE_ACL)) user.access|= (CREATE_VIEW_ACL | SHOW_VIEW_ACL); /* if it is pre 5.0.2 privilege table then map CREATE/ALTER privilege on CREATE PROCEDURE & ALTER PROCEDURE privileges */ - if (table->fields <= 33 && (user.access & CREATE_ACL)) + if (table->s->fields <= 33 && (user.access & CREATE_ACL)) user.access|= CREATE_PROC_ACL; - if (table->fields <= 33 && (user.access & ALTER_ACL)) + if (table->s->fields <= 33 && (user.access & ALTER_ACL)) user.access|= ALTER_PROC_ACL; user.sort= get_sort(2,user.host.hostname,user.user); user.hostname_length= (user.host.hostname ? (uint) strlen(user.host.hostname) : 0); - if (table->fields >= 31) /* Starting from 4.0.2 we have more fields */ + /* Starting from 4.0.2 we have more fields */ + if (table->s->fields >= 31) { char *ssl_type=get_field(&mem, table->field[next_field++]); if (!ssl_type) @@ -348,7 +349,7 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) user.user_resource.conn_per_hour) mqh_used=1; - if (table->fields >= 36) + if (table->s->fields >= 36) { /* Starting from 5.0.3 we have max_user_connections field */ ptr= get_field(&mem, table->field[next_field++]); @@ -362,7 +363,7 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) user.ssl_type=SSL_TYPE_NONE; bzero((char *)&(user.user_resource),sizeof(user.user_resource)); #ifndef TO_BE_REMOVED - if (table->fields <= 13) + if (table->s->fields <= 13) { // Without grant if (user.access & CREATE_ACL) user.access|=REFERENCES_ACL | INDEX_ACL | ALTER_ACL; @@ -410,7 +411,7 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) db.access=fix_rights_for_db(db.access); db.sort=get_sort(3,db.host.hostname,db.db,db.user); #ifndef TO_BE_REMOVED - if (table->fields <= 9) + if (table->s->fields <= 9) { // Without grant if (db.access & CREATE_ACL) db.access|=REFERENCES_ACL | INDEX_ACL | ALTER_ACL; @@ -1464,7 +1465,7 @@ static bool update_user_table(THD *thd, const char *host, const char *user, DBUG_PRINT("enter",("user: %s host: %s",user,host)); bzero((char*) &tables,sizeof(tables)); - tables.alias=tables.real_name=(char*) "user"; + tables.alias=tables.table_name=(char*) "user"; tables.db=(char*) "mysql"; #ifdef HAVE_REPLICATION @@ -1527,7 +1528,7 @@ static bool test_if_create_new_users(THD *thd) ulong db_access; bzero((char*) &tl,sizeof(tl)); tl.db= (char*) "mysql"; - tl.real_name= (char*) "user"; + tl.table_name= (char*) "user"; db_access=acl_get(thd->host, thd->ip, thd->priv_user, tl.db, 0); @@ -1607,7 +1608,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, goto end; } old_row_exists = 0; - restore_record(table,default_values); // cp empty row from default_values + restore_record(table,s->default_values); table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); table->field[1]->store(combo.user.str,combo.user.length, @@ -1654,8 +1655,8 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, (*tmp_field)->store(&what, 1, &my_charset_latin1); } rights= get_access(table, 3, &next_field); - DBUG_PRINT("info",("table->fields: %d",table->fields)); - if (table->fields >= 31) /* From 4.0.0 we have more fields */ + DBUG_PRINT("info",("table fields: %d",table->s->fields)); + if (table->s->fields >= 31) /* From 4.0.0 we have more fields */ { /* We write down SSL related ACL stuff */ switch (lex->ssl_type) { @@ -1704,7 +1705,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, table->field[next_field+1]->store((longlong) mqh.updates); if (mqh.specified_limits & USER_RESOURCES::CONNECTIONS_PER_HOUR) table->field[next_field+2]->store((longlong) mqh.conn_per_hour); - if (table->fields >= 36 && + if (table->s->fields >= 36 && (mqh.specified_limits & USER_RESOURCES::USER_CONNECTIONS)) table->field[next_field+3]->store((longlong) mqh.user_conn); mqh_used= mqh_used || mqh.questions || mqh.updates || mqh.conn_per_hour; @@ -1808,7 +1809,7 @@ static int replace_db_table(TABLE *table, const char *db, goto abort; } old_row_exists = 0; - restore_record(table,default_values); // cp empty row from default_values + restore_record(table, s->default_values); table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); table->field[1]->store(db,(uint) strlen(db), system_charset_info); table->field[2]->store(combo.user.str,combo.user.length, system_charset_info); @@ -1820,7 +1821,7 @@ static int replace_db_table(TABLE *table, const char *db, } store_rights=get_rights_for_db(rights); - for (i= 3, priv= 1; i < table->fields; i++, priv <<= 1) + for (i= 3, priv= 1; i < table->s->fields; i++, priv <<= 1) { if (priv & store_rights) // do it if priv is chosen table->field [i]->store(&what,1, &my_charset_latin1);// set requested privileges @@ -2191,7 +2192,7 @@ static int replace_column_table(GRANT_TABLE *g_t, continue; /* purecov: inspected */ } old_row_exists = 0; - restore_record(table,default_values); // Get empty record + restore_record(table, s->default_values); // Get empty record key_restore(table->record[0],key,table->key_info, key_prefix_length); table->field[4]->store(xx->column.ptr(),xx->column.length(), @@ -2343,7 +2344,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, DBUG_RETURN(-1); /* purecov: deadcode */ } - restore_record(table,default_values); // Get empty record + restore_record(table, s->default_values); // Get empty record table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); table->field[1]->store(db,(uint) strlen(db), system_charset_info); table->field[2]->store(combo.user.str,combo.user.length, system_charset_info); @@ -2464,7 +2465,7 @@ static int replace_proc_table(THD *thd, GRANT_NAME *grant_name, DBUG_RETURN(-1); } - restore_record(table,default_values); // Get empty record + restore_record(table, s->default_values); // Get empty record table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1); table->field[1]->store(db,(uint) strlen(db), &my_charset_latin1); table->field[2]->store(combo.user.str,combo.user.length, &my_charset_latin1); @@ -2573,7 +2574,7 @@ bool mysql_table_grant(THD *thd, TABLE_LIST *table_list, LEX_USER *Str; TABLE_LIST tables[3]; bool create_new_users=0; - char *db_name, *real_name; + char *db_name, *table_name; DBUG_ENTER("mysql_table_grant"); if (!initialized) @@ -2625,7 +2626,7 @@ bool mysql_table_grant(THD *thd, TABLE_LIST *table_list, { char buf[FN_REFLEN]; sprintf(buf,"%s/%s/%s.frm",mysql_data_home, table_list->db, - table_list->real_name); + table_list->table_name); fn_format(buf,buf,"","",4+16+32); if (access(buf,F_OK)) { @@ -2648,9 +2649,9 @@ bool mysql_table_grant(THD *thd, TABLE_LIST *table_list, /* open the mysql.tables_priv and mysql.columns_priv tables */ bzero((char*) &tables,sizeof(tables)); - tables[0].alias=tables[0].real_name= (char*) "user"; - tables[1].alias=tables[1].real_name= (char*) "tables_priv"; - tables[2].alias=tables[2].real_name= (char*) "columns_priv"; + tables[0].alias=tables[0].table_name= (char*) "user"; + tables[1].alias=tables[1].table_name= (char*) "tables_priv"; + tables[2].alias=tables[2].table_name= (char*) "columns_priv"; tables[0].next_local= tables[0].next_global= tables+1; /* Don't open column table if we don't need it ! */ tables[1].next_local= @@ -2717,24 +2718,24 @@ bool mysql_table_grant(THD *thd, TABLE_LIST *table_list, db_name= (table_list->view_db.length ? table_list->view_db.str : table_list->db); - real_name= (table_list->view_name.length ? + table_name= (table_list->view_name.length ? table_list->view_name.str : - table_list->real_name); + table_list->table_name); /* Find/create cached table grant */ grant_table= table_hash_search(Str->host.str, NullS, db_name, - Str->user.str, real_name, 1); + Str->user.str, table_name, 1); if (!grant_table) { if (revoke_grant) { my_error(ER_NONEXISTING_TABLE_GRANT, MYF(0), - Str->user.str, Str->host.str, table_list->real_name); + Str->user.str, Str->host.str, table_list->table_name); result= TRUE; continue; } grant_table = new GRANT_TABLE (Str->host.str, db_name, - Str->user.str, real_name, + Str->user.str, table_name, rights, column_priv); if (!grant_table) // end of memory @@ -2780,7 +2781,7 @@ bool mysql_table_grant(THD *thd, TABLE_LIST *table_list, /* update table and columns */ if (replace_table_table(thd, grant_table, tables[1].table, *Str, - db_name, real_name, + db_name, table_name, rights, column_priv, revoke_grant)) { /* Should only happen if table is crashed */ @@ -2790,7 +2791,7 @@ bool mysql_table_grant(THD *thd, TABLE_LIST *table_list, { if ((replace_column_table(grant_table, tables[2].table, *Str, columns, - db_name, real_name, + db_name, table_name, rights, revoke_grant))) { result= TRUE; @@ -2831,7 +2832,7 @@ bool mysql_procedure_grant(THD *thd, TABLE_LIST *table_list, LEX_USER *Str; TABLE_LIST tables[2]; bool create_new_users=0, result=0; - char *db_name, *real_name; + char *db_name, *table_name; DBUG_ENTER("mysql_procedure_grant"); if (!initialized) @@ -2858,8 +2859,8 @@ bool mysql_procedure_grant(THD *thd, TABLE_LIST *table_list, /* open the mysql.user and mysql.procs_priv tables */ bzero((char*) &tables,sizeof(tables)); - tables[0].alias=tables[0].real_name= (char*) "user"; - tables[1].alias=tables[1].real_name= (char*) "procs_priv"; + tables[0].alias=tables[0].table_name= (char*) "user"; + tables[1].alias=tables[1].table_name= (char*) "procs_priv"; tables[0].next_local= tables[0].next_global= tables+1; tables[0].lock_type=tables[1].lock_type=TL_WRITE; tables[0].db=tables[1].db=(char*) "mysql"; @@ -2920,22 +2921,22 @@ bool mysql_procedure_grant(THD *thd, TABLE_LIST *table_list, } db_name= table_list->db; - real_name= table_list->real_name; + table_name= table_list->table_name; grant_name= proc_hash_search(Str->host.str, NullS, db_name, - Str->user.str, real_name, 1); + Str->user.str, table_name, 1); if (!grant_name) { if (revoke_grant) { if (!no_error) my_error(ER_NONEXISTING_PROC_GRANT, MYF(0), - Str->user.str, Str->host.str, real_name); + Str->user.str, Str->host.str, table_name); result= TRUE; continue; } grant_name= new GRANT_NAME(Str->host.str, db_name, - Str->user.str, real_name, + Str->user.str, table_name, rights); if (!grant_name) { @@ -2946,7 +2947,7 @@ bool mysql_procedure_grant(THD *thd, TABLE_LIST *table_list, } if (replace_proc_table(thd, grant_name, tables[1].table, *Str, - db_name, real_name, rights, revoke_grant)) + db_name, table_name, rights, revoke_grant)) { result= TRUE; continue; @@ -2987,8 +2988,8 @@ bool mysql_grant(THD *thd, const char *db, List &list, /* open the mysql.user and mysql.db tables */ bzero((char*) &tables,sizeof(tables)); - tables[0].alias=tables[0].real_name=(char*) "user"; - tables[1].alias=tables[1].real_name=(char*) "db"; + tables[0].alias=tables[0].table_name=(char*) "user"; + tables[1].alias=tables[1].table_name=(char*) "db"; tables[0].next_local= tables[0].next_global= tables+1; tables[0].lock_type=tables[1].lock_type=TL_WRITE; tables[0].db=tables[1].db=(char*) "mysql"; @@ -3112,9 +3113,9 @@ my_bool grant_init(THD *org_thd) thd->db= my_strdup("mysql",MYF(0)); thd->db_length=5; // Safety bzero((char*) &tables, sizeof(tables)); - tables[0].alias=tables[0].real_name= (char*) "tables_priv"; - tables[1].alias=tables[1].real_name= (char*) "columns_priv"; - tables[2].alias=tables[2].real_name= (char*) "procs_priv"; + tables[0].alias=tables[0].table_name= (char*) "tables_priv"; + tables[1].alias=tables[1].table_name= (char*) "columns_priv"; + tables[2].alias=tables[2].table_name= (char*) "procs_priv"; tables[0].next_local= tables[0].next_global= tables+1; tables[1].next_local= tables[1].next_global= tables+2; tables[0].lock_type=tables[1].lock_type=tables[2].lock_type=TL_READ; @@ -3317,7 +3318,7 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, { GRANT_TABLE *grant_table; if (!(~table->grant.privilege & want_access) || - table->derived || table->schema_table) + table->derived || table->schema_table || table->belong_to_view) { /* It is subquery in the FROM clause. VIEW set table->derived after @@ -3327,7 +3328,7 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, continue; // Already checked } if (!(grant_table= table_hash_search(thd->host,thd->ip, - table->db,user, table->real_name,0))) + table->db,user, table->table_name,0))) { want_access &= ~table->grant.privilege; goto err; // No grants @@ -3363,14 +3364,14 @@ err: command, thd->priv_user, thd->host_or_ip, - table ? table->real_name : "unknown"); + table ? table->table_name : "unknown"); } DBUG_RETURN(1); } bool check_grant_column(THD *thd, GRANT_INFO *grant, - char *db_name, char *table_name, + const char *db_name, const char *table_name, const char *name, uint length, uint show_tables) { GRANT_TABLE *grant_table; @@ -3429,7 +3430,7 @@ err: bool check_grant_all_columns(THD *thd, ulong want_access, GRANT_INFO *grant, - char* db_name, char *table_name, + const char* db_name, const char *table_name, Field_iterator *fields) { GRANT_TABLE *grant_table; @@ -3552,7 +3553,7 @@ bool check_grant_procedure(THD *thd, ulong want_access, { GRANT_NAME *grant_proc; if ((grant_proc= proc_hash_search(host,thd->ip, - table->db, user, table->real_name, 0))) + table->db, user, table->table_name, 0))) table->grant.privilege|= grant_proc->privs; if (want_access & ~table->grant.privilege) @@ -3570,7 +3571,7 @@ err: char buff[1024]; const char *command=""; if (table) - strxmov(buff, table->db, ".", table->real_name, NullS); + strxmov(buff, table->db, ".", table->table_name, NullS); if (want_access & EXECUTE_ACL) command= "execute"; else if (want_access & ALTER_PROC_ACL) @@ -3600,7 +3601,7 @@ ulong get_table_grant(THD *thd, TABLE_LIST *table) grant_table= NULL; #else grant_table= table_hash_search(thd->host, thd->ip, db, user, - table->real_name, 0); + table->table_name, 0); #endif table->grant.grant_table=grant_table; // Remember for column test table->grant.version=grant_version; @@ -4169,11 +4170,11 @@ int open_grant_tables(THD *thd, TABLE_LIST *tables) } bzero((char*) tables, GRANT_TABLES*sizeof(*tables)); - tables->alias= tables->real_name= (char*) "user"; - (tables+1)->alias= (tables+1)->real_name= (char*) "db"; - (tables+2)->alias= (tables+2)->real_name= (char*) "tables_priv"; - (tables+3)->alias= (tables+3)->real_name= (char*) "columns_priv"; - (tables+4)->alias= (tables+4)->real_name= (char*) "procs_priv"; + tables->alias= tables->table_name= (char*) "user"; + (tables+1)->alias= (tables+1)->table_name= (char*) "db"; + (tables+2)->alias= (tables+2)->table_name= (char*) "tables_priv"; + (tables+3)->alias= (tables+3)->table_name= (char*) "columns_priv"; + (tables+4)->alias= (tables+4)->table_name= (char*) "procs_priv"; tables->next_local= tables->next_global= tables+1; (tables+1)->next_local= (tables+1)->next_global= tables+2; (tables+2)->next_local= (tables+2)->next_global= tables+3; @@ -4347,7 +4348,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, by the searched record, if it exists. */ DBUG_PRINT("info",("read table: '%s' search: '%s'@'%s'", - table->real_name, user_str, host_str)); + table->s->table_name, user_str, host_str)); host_field->store(host_str, user_from->host.length, system_charset_info); user_field->store(user_str, user_from->user.length, system_charset_info); @@ -4390,7 +4391,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, { #ifdef EXTRA_DEBUG DBUG_PRINT("info",("scan table: '%s' search: '%s'@'%s'", - table->real_name, user_str, host_str)); + table->s->table_name, user_str, host_str)); #endif while ((error= table->file->rnd_next(table->record[0])) != HA_ERR_END_OF_FILE) @@ -4439,7 +4440,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, SYNOPSIS handle_grant_struct() - struct_no The number of the structure to handle (0..2). + struct_no The number of the structure to handle (0..3). drop If user_from is to be dropped. user_from The the user to be searched/dropped/renamed. user_to The new name for the user if to be renamed, @@ -4460,6 +4461,7 @@ static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, RETURN > 0 At least one element matched. 0 OK, but no element matched. + -1 Wrong arguments to function */ static int handle_grant_struct(uint struct_no, bool drop, @@ -4481,8 +4483,7 @@ static int handle_grant_struct(uint struct_no, bool drop, struct_no, user_from->user.str, user_from->host.str)); /* Get the number of elements in the in-memory structure. */ - switch (struct_no) - { + switch (struct_no) { case 0: elements= acl_users.elements; break; @@ -4496,7 +4497,6 @@ static int handle_grant_struct(uint struct_no, bool drop, elements= proc_priv_hash.records; break; default: - DBUG_ASSERT((struct_no < 0) || (struct_no > 3)); return -1; } @@ -4511,8 +4511,7 @@ static int handle_grant_struct(uint struct_no, bool drop, Get a pointer to the element. Unfortunaltely, the host default differs for the structures. */ - switch (struct_no) - { + switch (struct_no) { case 0: acl_user= dynamic_element(&acl_users, idx, ACL_USER*); user= acl_user->user; @@ -4539,9 +4538,9 @@ static int handle_grant_struct(uint struct_no, bool drop, break; } if (! user) - user= ""; + user= ""; if (! host) - host= ""; + host= ""; #ifdef EXTRA_DEBUG DBUG_PRINT("loop",("scan struct: %u index: %u user: '%s' host: '%s'", struct_no, idx, user, host)); @@ -4576,8 +4575,7 @@ static int handle_grant_struct(uint struct_no, bool drop, } else if ( user_to ) { - switch ( struct_no ) - { + switch ( struct_no ) { case 0: acl_user->user= strdup_root(&mem, user_to->user.str); acl_user->host.hostname= strdup_root(&mem, user_to->host.str); @@ -5186,7 +5184,7 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name) user_list.empty(); tables->db= (char*)sp_db; - tables->real_name= tables->alias= (char*)sp_name; + tables->table_name= tables->alias= (char*)sp_name; combo->host.length= strlen(combo->host.str); combo->user.length= strlen(combo->user.str); @@ -5271,7 +5269,7 @@ void update_schema_privilege(TABLE *table, char *buff, const char* db, { int i= 2; CHARSET_INFO *cs= system_charset_info; - restore_record(table, default_values); + restore_record(table, s->default_values); table->field[0]->store(buff, strlen(buff), cs); if (db) table->field[i++]->store(db, strlen(db), cs); diff --git a/sql/sql_acl.h b/sql/sql_acl.h index 4ebc3ad7707..b129ffcdcf8 100644 --- a/sql/sql_acl.h +++ b/sql/sql_acl.h @@ -193,10 +193,10 @@ void grant_reload(THD *thd); bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, uint show_command, uint number, bool dont_print_error); bool check_grant_column (THD *thd, GRANT_INFO *grant, - char *db_name, char *table_name, + const char *db_name, const char *table_name, const char *name, uint length, uint show_command=0); bool check_grant_all_columns(THD *thd, ulong want_access, GRANT_INFO *grant, - char* db_name, char *table_name, + const char* db_name, const char *table_name, Field_iterator *fields); bool check_grant_procedure(THD *thd, ulong want_access, TABLE_LIST *procs, bool no_error); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index b6510e53679..405d95bf456 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -49,8 +49,8 @@ extern "C" byte *table_cache_key(const byte *record,uint *length, my_bool not_used __attribute__((unused))) { TABLE *entry=(TABLE*) record; - *length=entry->key_length; - return (byte*) entry->table_cache_key; + *length= entry->s->key_length; + return (byte*) entry->s->table_cache_key; } bool table_cache_init(void) @@ -148,20 +148,21 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild) { OPEN_TABLE_LIST *table; TABLE *entry=(TABLE*) hash_element(&open_cache,idx); + TABLE_SHARE *share= entry->s; - DBUG_ASSERT(entry->real_name); - if ((!entry->real_name)) // To be removed + DBUG_ASSERT(share->table_name); + if ((!share->table_name)) // To be removed continue; // Shouldn't happen if (wild) { - strxmov(name,entry->table_cache_key,".",entry->real_name,NullS); + strxmov(name,share->table_cache_key,".",share->table_name,NullS); if (wild_compare(name,wild,0)) continue; } /* Check if user has SELECT privilege for any column in the table */ - table_list.db= (char*) entry->table_cache_key; - table_list.real_name= entry->real_name; + table_list.db= (char*) share->db; + table_list.table_name= (char*) share->table_name; table_list.grant.privilege=0; if (check_table_access(thd,SELECT_ACL | EXTRA_ACL,&table_list,1)) @@ -169,8 +170,8 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild) /* need to check if we haven't already listed it */ for (table= open_list ; table ; table=table->next) { - if (!strcmp(table->table,entry->real_name) && - !strcmp(table->db,entry->table_cache_key)) + if (!strcmp(table->table,share->table_name) && + !strcmp(table->db,entry->s->db)) { if (entry->in_use) table->in_use++; @@ -182,15 +183,15 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild) if (table) continue; if (!(*start_list = (OPEN_TABLE_LIST *) - sql_alloc(sizeof(**start_list)+entry->key_length))) + sql_alloc(sizeof(**start_list)+share->key_length))) { open_list=0; // Out of memory break; } strmov((*start_list)->table= strmov(((*start_list)->db= (char*) ((*start_list)+1)), - entry->table_cache_key)+1, - entry->real_name); + entry->s->db)+1, + entry->s->table_name); (*start_list)->in_use= entry->in_use ? 1 : 0; (*start_list)->locked= entry->locked_by_name ? 1 : 0; start_list= &(*start_list)->next; @@ -287,7 +288,7 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, bool found=0; for (TABLE_LIST *table= tables; table; table= table->next_local) { - if (remove_table_from_cache(thd, table->db, table->real_name, 1)) + if (remove_table_from_cache(thd, table->db, table->table_name, 1)) found=1; } if (!found) @@ -319,7 +320,7 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, for (uint idx=0 ; idx < open_cache.records ; idx++) { TABLE *table=(TABLE*) hash_element(&open_cache,idx); - if ((table->version) < refresh_version && table->db_stat) + if ((table->s->version) < refresh_version && table->db_stat) { found=1; pthread_cond_wait(&COND_refresh,&LOCK_open); @@ -337,7 +338,7 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, thd->in_lock_tables=0; /* Set version for table */ for (TABLE *table=thd->open_tables; table ; table= table->next) - table->version=refresh_version; + table->s->version= refresh_version; } VOID(pthread_mutex_unlock(&LOCK_open)); if (if_wait_for_refresh) @@ -439,7 +440,7 @@ bool close_thread_table(THD *thd, TABLE **table_ptr) DBUG_ASSERT(table->file->inited == handler::NONE); *table_ptr=table->next; - if (table->version != refresh_version || + if (table->s->version != refresh_version || thd->version != refresh_version || !table->db_stat) { VOID(hash_delete(&open_cache,(byte*) table)); @@ -447,9 +448,9 @@ bool close_thread_table(THD *thd, TABLE **table_ptr) } else { - if (table->flush_version != flush_version) + if (table->s->flush_version != flush_version) { - table->flush_version=flush_version; + table->s->flush_version= flush_version; table->file->extra(HA_EXTRA_FLUSH); } else @@ -477,8 +478,8 @@ void close_temporary(TABLE *table,bool delete_table) { DBUG_ENTER("close_temporary"); char path[FN_REFLEN]; - db_type table_type=table->db_type; - strmov(path,table->path); + db_type table_type=table->s->db_type; + strmov(path,table->s->path); free_io_cache(table); closefrm(table); my_free((char*) table,MYF(0)); @@ -507,7 +508,7 @@ void close_temporary_tables(THD *thd) enough; indeed it is enough, because table->key_length is greater (by 8, because of server_id and thread_id) than db||table. */ - query_buf_size+= table->key_length+1; + query_buf_size+= table->s->key_length+1; if ((query = alloc_root(thd->mem_root, query_buf_size))) // Better add "if exists", in case a RESET MASTER has been done @@ -518,14 +519,10 @@ void close_temporary_tables(THD *thd) if (query) // we might be out of memory, but this is not fatal { // skip temporary tables not created directly by the user - if (table->real_name[0] != '#') + if (table->s->table_name[0] != '#') found_user_tables = 1; - /* - Here we assume table_cache_key always starts - with \0 terminated db name - */ - end = strxmov(end,"`",table->table_cache_key,"`.`", - table->real_name,"`,", NullS); + end = strxmov(end,"`",table->s->db,"`.`", + table->s->table_name,"`,", NullS); } next=table->next; close_temporary(table); @@ -579,14 +576,14 @@ TABLE_LIST *find_table_in_list(TABLE_LIST *table, { for (; table; table= *(TABLE_LIST **) ((char*) table + offset)) { - if ((table->table == 0 || table->table->tmp_table == NO_TMP_TABLE) && + if ((table->table == 0 || table->table->s->tmp_table == NO_TMP_TABLE) && ((!strcmp(table->db, db_name) && - !strcmp(table->real_name, table_name)) || + !strcmp(table->table_name, table_name)) || (table->view && !my_strcasecmp(table_alias_charset, - table->table->table_cache_key, db_name) && + table->db, db_name) && !my_strcasecmp(table_alias_charset, - table->table->table_name, table_name)))) + table->table->alias, table_name)))) break; } } @@ -594,12 +591,12 @@ TABLE_LIST *find_table_in_list(TABLE_LIST *table, { for (; table; table= *(TABLE_LIST **) ((char*) table + offset)) { - if ((table->table == 0 || table->table->tmp_table == NO_TMP_TABLE) && + if ((table->table == 0 || table->table->s->tmp_table == NO_TMP_TABLE) && ((!strcmp(table->db, db_name) && - !strcmp(table->real_name, table_name)) || + !strcmp(table->table_name, table_name)) || (table->view && - !strcmp(table->table->table_cache_key, db_name) && - !strcmp(table->table->table_name, table_name)))) + !strcmp(table->table->s->db, db_name) && + !strcmp(table->table->alias, table_name)))) break; } } @@ -625,27 +622,27 @@ TABLE_LIST* unique_table(TABLE_LIST *table, TABLE_LIST *table_list) DBUG_ENTER("unique_table"); DBUG_PRINT("enter", ("table alias: %s", table->alias)); TABLE_LIST *res; - const char *d_name= table->db, *t_name= table->real_name; + const char *d_name= table->db, *t_name= table->table_name; char d_name_buff[MAX_ALIAS_NAME], t_name_buff[MAX_ALIAS_NAME]; /* temporary table is always unique */ - if (table->table && table->table->tmp_table != NO_TMP_TABLE) + if (table->table && table->table->s->tmp_table != NO_TMP_TABLE) return 0; if (table->view) { /* it is view and table opened */ if (lower_case_table_names) { - strmov(t_name_buff, table->table->table_name); + strmov(t_name_buff, table->table->alias); my_casedn_str(files_charset_info, t_name_buff); t_name= t_name_buff; - strmov(d_name_buff, table->table->table_cache_key); + strmov(d_name_buff, table->table->s->db); my_casedn_str(files_charset_info, d_name_buff); d_name= d_name_buff; } else { - d_name= table->table->table_cache_key; - t_name= table->table->table_name; + d_name= table->table->s->db; + t_name= table->table->alias; } } @@ -677,8 +674,8 @@ TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name) prev= &thd->temporary_tables; for (table=thd->temporary_tables ; table ; table=table->next) { - if (table->key_length == key_length && - !memcmp(table->table_cache_key,key,key_length)) + if (table->s->key_length == key_length && + !memcmp(table->s->table_cache_key,key,key_length)) return prev; prev= &table->next; } @@ -706,22 +703,26 @@ bool close_temporary_table(THD *thd, const char *db, const char *table_name) Prepares a table cache key, which is the concatenation of db, table_name and thd->slave_proxy_id, separated by '\0'. */ + bool rename_temporary_table(THD* thd, TABLE *table, const char *db, const char *table_name) { char *key; + TABLE_SHARE *share= table->s; + if (!(key=(char*) alloc_root(&table->mem_root, (uint) strlen(db)+ (uint) strlen(table_name)+6+4))) return 1; /* purecov: inspected */ - table->key_length=(uint) - (strmov((table->real_name=strmov(table->table_cache_key=key, - db)+1), - table_name) - table->table_cache_key)+1; - int4store(key+table->key_length,thd->server_id); - table->key_length += 4; - int4store(key+table->key_length,thd->variables.pseudo_thread_id); - table->key_length += 4; + share->key_length= (uint) + (strmov(((char*) share->table_name= strmov(share->table_cache_key= key, + db)+1), + table_name) - share->table_cache_key)+1; + share->db= share->table_cache_key; + int4store(key+share->key_length, thd->server_id); + share->key_length+= 4; + int4store(key+share->key_length, thd->variables.pseudo_thread_id); + share->key_length+= 4; return 0; } @@ -752,15 +753,16 @@ static void relink_unused(TABLE *table) TABLE *unlink_open_table(THD *thd, TABLE *list, TABLE *find) { char key[MAX_DBKEY_LENGTH]; - uint key_length=find->key_length; + uint key_length= find->s->key_length; TABLE *start=list,**prev,*next; prev= &start; - memcpy(key,find->table_cache_key,key_length); + + memcpy(key, find->s->table_cache_key, key_length); for (; list ; list=next) { next=list->next; - if (list->key_length == key_length && - !memcmp(list->table_cache_key,key,key_length)) + if (list->s->key_length == key_length && + !memcmp(list->s->table_cache_key, key, key_length)) { if (thd->locked_tables) mysql_lock_remove(thd, thd->locked_tables,list); @@ -811,12 +813,13 @@ TABLE *reopen_name_locked_table(THD* thd, TABLE_LIST* table_list) DBUG_ENTER("reopen_name_locked_table"); if (thd->killed) DBUG_RETURN(0); - TABLE* table; + TABLE *table; + TABLE_SHARE *share; if (!(table = table_list->table)) DBUG_RETURN(0); char* db = thd->db ? thd->db : table_list->db; - char* table_name = table_list->real_name; + char* table_name = table_list->table_name; char key[MAX_DBKEY_LENGTH]; uint key_length; key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1; @@ -824,8 +827,8 @@ TABLE *reopen_name_locked_table(THD* thd, TABLE_LIST* table_list) pthread_mutex_lock(&LOCK_open); if (open_unireg_entry(thd, table, db, table_name, table_name, 0, thd->mem_root) || - !(table->table_cache_key =memdup_root(&table->mem_root,(char*) key, - key_length))) + !(table->s->table_cache_key= memdup_root(&table->mem_root, (char*) key, + key_length))) { delete table->triggers; closefrm(table); @@ -833,9 +836,11 @@ TABLE *reopen_name_locked_table(THD* thd, TABLE_LIST* table_list) DBUG_RETURN(0); } - table->key_length=key_length; - table->version=0; - table->flush_version=0; + share= table->s; + share->db= share->table_cache_key; + share->key_length=key_length; + share->version=0; + share->flush_version=0; table->in_use = thd; check_unused(); pthread_mutex_unlock(&LOCK_open); @@ -846,8 +851,8 @@ TABLE *reopen_name_locked_table(THD* thd, TABLE_LIST* table_list) table->const_table=0; table->outer_join= table->null_row= table->maybe_null= table->force_index= 0; table->status=STATUS_NO_RECORD; - table->keys_in_use_for_query= table->keys_in_use; - table->used_keys= table->keys_for_keyread; + table->keys_in_use_for_query= share->keys_in_use; + table->used_keys= share->keys_for_keyread; DBUG_RETURN(table); } @@ -877,7 +882,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, if (thd->killed) DBUG_RETURN(0); key_length= (uint) (strmov(strmov(key, table_list->db)+1, - table_list->real_name)-key)+1; + table_list->table_name)-key)+1; int4store(key + key_length, thd->server_id); int4store(key + key_length + 4, thd->variables.pseudo_thread_id); @@ -885,13 +890,13 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, { for (table= thd->temporary_tables; table ; table=table->next) { - if (table->key_length == key_length + TMP_TABLE_KEY_EXTRA && - !memcmp(table->table_cache_key, key, + if (table->s->key_length == key_length + TMP_TABLE_KEY_EXTRA && + !memcmp(table->s->table_cache_key, key, key_length + TMP_TABLE_KEY_EXTRA)) { if (table->query_id == thd->query_id) { - my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->table_name); + my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias); DBUG_RETURN(0); } table->query_id= thd->query_id; @@ -907,9 +912,9 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, { // Using table locks for (table=thd->open_tables; table ; table=table->next) { - if (table->key_length == key_length && - !memcmp(table->table_cache_key,key,key_length) && - !my_strcasecmp(system_charset_info, table->table_name, alias) && + if (table->s->key_length == key_length && + !memcmp(table->s->table_cache_key,key,key_length) && + !my_strcasecmp(system_charset_info, table->alias, alias) && table->query_id != thd->query_id) { table->query_id=thd->query_id; @@ -925,7 +930,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, { char path[FN_REFLEN]; strxnmov(path, FN_REFLEN, mysql_data_home, "/", table_list->db, "/", - table_list->real_name, reg_ext, NullS); + table_list->table_name, reg_ext, NullS); (void) unpack_filename(path, path); if (mysql_frm_type(path) == FRMTYPE_VIEW) { @@ -933,7 +938,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, table= &tab; VOID(pthread_mutex_lock(&LOCK_open)); if (open_unireg_entry(thd, table, table_list->db, - table_list->real_name, + table_list->table_name, alias, table_list, mem_root)) { table->next=table->prev=table; @@ -972,7 +977,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, table && table->in_use ; table = (TABLE*) hash_next(&open_cache,(byte*) key,key_length)) { - if (table->version != refresh_version) + if (table->s->version != refresh_version) { /* ** There is a refresh in progress for this table @@ -1004,6 +1009,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, } else { + TABLE_SHARE *share; /* Free cache if too big */ while (open_cache.records > table_cache_size && unused_tables) VOID(hash_delete(&open_cache,(byte*) unused_tables)); /* purecov: tested */ @@ -1014,11 +1020,12 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, VOID(pthread_mutex_unlock(&LOCK_open)); DBUG_RETURN(NULL); } - if (open_unireg_entry(thd, table, table_list->db, table_list->real_name, + if (open_unireg_entry(thd, table, table_list->db, table_list->table_name, alias, table_list, mem_root) || (!table_list->view && - !(table->table_cache_key= memdup_root(&table->mem_root, (char*) key, - key_length)))) + !(table->s->table_cache_key= memdup_root(&table->mem_root, + (char*) key, + key_length)))) { table->next=table->prev=table; free_cache_entry(table); @@ -1031,9 +1038,11 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, VOID(pthread_mutex_unlock(&LOCK_open)); DBUG_RETURN(0); // VIEW } - table->key_length=key_length; - table->version=refresh_version; - table->flush_version=flush_version; + share= table->s; + share->db= share->table_cache_key; + share->key_length= key_length; + share->version= refresh_version; + share->flush_version= flush_version; DBUG_PRINT("info", ("inserting table %p into the cache", table)); VOID(my_hash_insert(&open_cache,(byte*) table)); } @@ -1051,16 +1060,14 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, reset: if (thd->lex->need_correct_ident()) table->alias_name_used= my_strcasecmp(table_alias_charset, - table->real_name, alias); + table->s->table_name, alias); /* Fix alias if table name changes */ - if (strcmp(table->table_name, alias)) + if (strcmp(table->alias, alias)) { uint length=(uint) strlen(alias)+1; - table->table_name= (char*) my_realloc(table->table_name,length, - MYF(MY_WME)); - memcpy(table->table_name,alias,length); - for (uint i=0 ; i < table->fields ; i++) - table->field[i]->table_name=table->table_name; + table->alias= (char*) my_realloc((char*) table->alias, length, + MYF(MY_WME)); + memcpy((char*) table->alias, alias, length); } /* These variables are also set in reopen_table() */ table->tablenr=thd->current_tablenr++; @@ -1068,9 +1075,9 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, table->const_table=0; table->outer_join= table->null_row= table->maybe_null= table->force_index= 0; table->status=STATUS_NO_RECORD; - table->keys_in_use_for_query= table->keys_in_use; + table->keys_in_use_for_query= table->s->keys_in_use; table->insert_values= 0; - table->used_keys= table->keys_for_keyread; + table->used_keys= table->s->keys_for_keyread; if (table->timestamp_field) table->timestamp_field_type= table->timestamp_field->get_auto_set_type(); table_list->updatable= 1; // It is not derived table nor non-updatable VIEW @@ -1086,8 +1093,8 @@ TABLE *find_locked_table(THD *thd, const char *db,const char *table_name) for (TABLE *table=thd->open_tables; table ; table=table->next) { - if (table->key_length == key_length && - !memcmp(table->table_cache_key,key,key_length)) + if (table->s->key_length == key_length && + !memcmp(table->s->table_cache_key,key,key_length)) return table; } return(0); @@ -1104,9 +1111,9 @@ TABLE *find_locked_table(THD *thd, const char *db,const char *table_name) bool reopen_table(TABLE *table,bool locked) { TABLE tmp; - char *db=table->table_cache_key; - char *table_name=table->real_name; - bool error=1; + char *db= table->s->table_cache_key; + const char *table_name= table->s->table_name; + bool error= 1; Field **field; uint key,part; DBUG_ENTER("reopen_table"); @@ -1114,24 +1121,25 @@ bool reopen_table(TABLE *table,bool locked) #ifdef EXTRA_DEBUG if (table->db_stat) sql_print_error("Table %s had a open data handler in reopen_table", - table->table_name); + table->alias); #endif if (!locked) VOID(pthread_mutex_lock(&LOCK_open)); safe_mutex_assert_owner(&LOCK_open); if (open_unireg_entry(table->in_use, &tmp, db, table_name, - table->table_name, 0, table->in_use->mem_root)) + table->alias, 0, table->in_use->mem_root)) goto end; free_io_cache(table); - if (!(tmp.table_cache_key= memdup_root(&tmp.mem_root,db, - table->key_length))) + if (!(tmp.s->table_cache_key= memdup_root(&tmp.mem_root,db, + table->s->key_length))) { delete tmp.triggers; closefrm(&tmp); // End of memory goto end; } + tmp.s->db= tmp.s->table_cache_key; /* This list copies variables set by open_table */ tmp.tablenr= table->tablenr; @@ -1141,16 +1149,16 @@ bool reopen_table(TABLE *table,bool locked) tmp.null_row= table->null_row; tmp.maybe_null= table->maybe_null; tmp.status= table->status; - tmp.keys_in_use_for_query= tmp.keys_in_use; - tmp.used_keys= tmp.keys_for_keyread; + tmp.keys_in_use_for_query= tmp.s->keys_in_use; + tmp.used_keys= tmp.s->keys_for_keyread; tmp.force_index= tmp.force_index; /* Get state */ - tmp.key_length= table->key_length; + tmp.s->key_length= table->s->key_length; tmp.in_use= table->in_use; tmp.reginfo.lock_type=table->reginfo.lock_type; - tmp.version= refresh_version; - tmp.tmp_table= table->tmp_table; + tmp.s->version= refresh_version; + tmp.s->tmp_table= table->s->tmp_table; tmp.grant= table->grant; /* Replace table in open list */ @@ -1161,16 +1169,17 @@ bool reopen_table(TABLE *table,bool locked) if (table->file) VOID(closefrm(table)); // close file, free everything - *table=tmp; + *table= tmp; + table->s= &table->share_not_to_be_used; table->file->change_table_ptr(table); - DBUG_ASSERT(table->table_name); + DBUG_ASSERT(table->alias); for (field=table->field ; *field ; field++) { (*field)->table= (*field)->orig_table= table; - (*field)->table_name=table->table_name; + (*field)->table_name= &table->alias; } - for (key=0 ; key < table->keys ; key++) + for (key=0 ; key < table->s->keys ; key++) for (part=0 ; part < table->key_info[key].usable_key_parts ; part++) table->key_info[key].key_part[part].field->table= table; VOID(pthread_cond_broadcast(&COND_refresh)); @@ -1194,8 +1203,8 @@ bool close_data_tables(THD *thd,const char *db, const char *table_name) TABLE *table; for (table=thd->open_tables; table ; table=table->next) { - if (!strcmp(table->real_name,table_name) && - !strcmp(table->table_cache_key,db)) + if (!strcmp(table->s->table_name, table_name) && + !strcmp(table->s->db, db)) { mysql_lock_remove(thd, thd->locked_tables,table); table->file->close(); @@ -1240,7 +1249,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh) next=table->next; if (!tables || (!db_stat && reopen_table(table,1))) { - my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->table_name); + my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias); VOID(hash_delete(&open_cache,(byte*) table)); error=1; } @@ -1252,7 +1261,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh) *tables_ptr++= table; // need new lock on this if (in_refresh) { - table->version=0; + table->s->version=0; table->locked_by_flush=0; } } @@ -1291,11 +1300,11 @@ void close_old_data_files(THD *thd, TABLE *table, bool abort_locks, bool found=send_refresh; for (; table ; table=table->next) { - if (table->version != refresh_version) + if (table->s->version != refresh_version) { found=1; if (!abort_locks) // If not from flush tables - table->version = refresh_version; // Let other threads use table + table->s->version= refresh_version; // Let other threads use table if (table->db_stat) { if (abort_locks) @@ -1325,8 +1334,8 @@ bool table_is_used(TABLE *table, bool wait_for_name_lock) { do { - char *key= table->table_cache_key; - uint key_length=table->key_length; + char *key= table->s->table_cache_key; + uint key_length= table->s->key_length; for (TABLE *search=(TABLE*) hash_search(&open_cache, (byte*) key,key_length) ; search ; @@ -1334,7 +1343,7 @@ bool table_is_used(TABLE *table, bool wait_for_name_lock) { if (search->locked_by_flush || search->locked_by_name && wait_for_name_lock || - search->db_stat && search->version < refresh_version) + search->db_stat && search->s->version < refresh_version) return 1; // Table is used } } while ((table=table->next)); @@ -1381,11 +1390,11 @@ bool drop_locked_tables(THD *thd,const char *db, const char *table_name) TABLE *table,*next,**prev; bool found=0; prev= &thd->open_tables; - for (table=thd->open_tables; table ; table=next) + for (table= thd->open_tables; table ; table=next) { next=table->next; - if (!strcmp(table->real_name,table_name) && - !strcmp(table->table_cache_key,db)) + if (!strcmp(table->s->table_name, table_name) && + !strcmp(table->s->db, db)) { mysql_lock_remove(thd, thd->locked_tables,table); VOID(hash_delete(&open_cache,(byte*) table)); @@ -1420,8 +1429,8 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name) TABLE *table; for (table= thd->open_tables; table ; table= table->next) { - if (!strcmp(table->real_name,table_name) && - !strcmp(table->table_cache_key,db)) + if (!strcmp(table->s->table_name,table_name) && + !strcmp(table->s->db, db)) { mysql_lock_abort(thd,table); break; @@ -1475,7 +1484,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, thd->open_options, entry, table_desc, mem_root)))) { - if (!entry->crashed) + if (!entry->s || !entry->s->crashed) { /* Frm file could not be found on disk @@ -1496,7 +1505,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, TABLE_LIST table_list; bzero((char*) &table_list, sizeof(table_list)); // just for safe table_list.db=(char*) db; - table_list.real_name=(char*) name; + table_list.table_name=(char*) name; safe_mutex_assert_owner(&LOCK_open); @@ -1663,9 +1672,9 @@ int open_tables(THD *thd, TABLE_LIST *start, uint *counter) for (TABLE_LIST *tmp= start; tmp; tmp= tmp->next_global) { /* Close normal (not temporary) changed tables */ - if (tmp->table && ! tmp->table->tmp_table) + if (tmp->table && ! tmp->table->s->tmp_table) { - if (tmp->table->version != refresh_version || + if (tmp->table->s->version != refresh_version || ! tmp->table->db_stat) { VOID(hash_delete(&open_cache,(byte*) tmp->table)); @@ -1724,7 +1733,7 @@ static bool check_lock_and_start_stmt(THD *thd, TABLE *table, if ((int) lock_type >= (int) TL_WRITE_ALLOW_READ && (int) table->reginfo.lock_type < (int) TL_WRITE_ALLOW_READ) { - my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0),table->table_name); + my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0),table->alias); DBUG_RETURN(1); } if ((error=table->file->start_stmt(thd))) @@ -1911,7 +1920,7 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count) { DBUG_ASSERT(thd->lock == 0); // You must lock everything at once TABLE **start,**ptr; - if (!(ptr=start=(TABLE**) sql_alloc(sizeof(TABLE*)*count))) + if (!(ptr=start=(TABLE**) thd->alloc(sizeof(TABLE*)*count))) return -1; for (table= tables; table; table= table->next_global) { @@ -1947,6 +1956,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, const char *table_name, bool link_in_list) { TABLE *tmp_table; + TABLE_SHARE *share; DBUG_ENTER("open_temporary_table"); /* @@ -1971,20 +1981,22 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, DBUG_RETURN(0); } + share= tmp_table->s; tmp_table->reginfo.lock_type=TL_WRITE; // Simulate locked - tmp_table->tmp_table = (tmp_table->file->has_transactions() ? - TRANSACTIONAL_TMP_TABLE : TMP_TABLE); - tmp_table->table_cache_key=(char*) (tmp_table+1); - tmp_table->key_length= (uint) (strmov((tmp_table->real_name= - strmov(tmp_table->table_cache_key,db) - +1), table_name) - - tmp_table->table_cache_key)+1; - int4store(tmp_table->table_cache_key + tmp_table->key_length, - thd->server_id); - tmp_table->key_length += 4; - int4store(tmp_table->table_cache_key + tmp_table->key_length, + share->tmp_table= (tmp_table->file->has_transactions() ? + TRANSACTIONAL_TMP_TABLE : TMP_TABLE); + share->table_cache_key= (char*) (tmp_table+1); + share->db= share->table_cache_key; + share->key_length= (uint) (strmov(((char*) (share->table_name= + strmov(share->table_cache_key, + db)+1)), + table_name) - + share->table_cache_key) +1; + int4store(share->table_cache_key + share->key_length, thd->server_id); + share->key_length+= 4; + int4store(share->table_cache_key + share->key_length, thd->variables.pseudo_thread_id); - tmp_table->key_length += 4; + share->key_length+= 4; if (link_in_list) { @@ -2089,7 +2101,12 @@ find_field_in_table(THD *thd, TABLE_LIST *table_list, DBUG_RETURN(WRONG_GRANT); #endif if (thd->lex->current_select->no_wrap_view_item) - *ref= trans[i].item; + { + if (register_tree_change) + thd->change_item_tree(ref, trans[i].item); + else + *ref= trans[i].item; + } else { Item_ref *item_ref= new Item_ref(&trans[i].item, @@ -2098,6 +2115,8 @@ find_field_in_table(THD *thd, TABLE_LIST *table_list, /* as far as Item_ref have defined reference it do not need tables */ if (register_tree_change && item_ref) thd->change_item_tree(ref, item_ref); + else if (item_ref) + *ref= item_ref; } DBUG_RETURN((Field*) view_ref_found); } @@ -2151,13 +2170,13 @@ Field *find_field_in_real_table(THD *thd, TABLE *table, uint cached_field_index= *cached_field_index_ptr; /* We assume here that table->field < NO_CACHED_FIELD_INDEX = UINT_MAX */ - if (cached_field_index < table->fields && + if (cached_field_index < table->s->fields && !my_strcasecmp(system_charset_info, table->field[cached_field_index]->field_name, name)) field_ptr= table->field + cached_field_index; - else if (table->name_hash.records) - field_ptr= (Field**)hash_search(&table->name_hash,(byte*) name, - length); + else if (table->s->name_hash.records) + field_ptr= (Field**) hash_search(&table->s->name_hash, (byte*) name, + length); else { if (!(field_ptr= table->field)) @@ -2193,8 +2212,8 @@ Field *find_field_in_real_table(THD *thd, TABLE *table, } #ifndef NO_EMBEDDED_ACCESS_CHECKS if (check_grants && check_grant_column(thd, &table->grant, - table->table_cache_key, - table->real_name, name, length)) + table->s->db, + table->s->table_name, name, length)) return WRONG_GRANT; #endif return field; @@ -2247,15 +2266,15 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, if (item->cached_table) { /* - This shortcut is used by prepared statements. We assuming that - TABLE_LIST *tables is not changed during query execution (which - is true for all queries except RENAME but luckily RENAME doesn't + This shortcut is used by prepared statements. We assuming that + TABLE_LIST *tables is not changed during query execution (which + is true for all queries except RENAME but luckily RENAME doesn't use fields...) so we can rely on reusing pointer to its member. With this optimization we also miss case when addition of one more - field makes some prepared query ambiguous and so erroneous, but we + field makes some prepared query ambiguous and so erroneous, but we accept this trade off. */ - if (item->cached_table->table) + if (item->cached_table->table && !item->cached_table->view) { found= find_field_in_real_table(thd, item->cached_table->table, name, length, @@ -2268,7 +2287,7 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, else { TABLE_LIST *table= item->cached_table; - Field *find= find_field_in_table(thd, table, name, item->name, length, + found= find_field_in_table(thd, table, name, item->name, length, ref, (table->table && test(table->table->grant. @@ -2304,8 +2323,10 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, bool found_table=0; for (; tables; tables= tables->next_local) { + /* TODO; Ensure that db and tables->db always points to something ! */ if (!my_strcasecmp(table_alias_charset, tables->alias, table_name) && - (!db || !tables->db || !tables->db[0] || !strcmp(db,tables->db))) + (!db || !db[0] || !tables->db || !tables->db[0] || + !strcmp(db,tables->db))) { found_table=1; Field *find= find_field_in_table(thd, tables, name, item->name, @@ -2391,9 +2412,7 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, { if (field == WRONG_GRANT) return (Field*) 0; - item->cached_table= tables; - if (!tables->cacheable_table) - item->cached_table= 0; + item->cached_table= (!tables->cacheable_table || found) ? 0 : tables; if (found) { if (!thd->where) // Returns first found @@ -2812,7 +2831,7 @@ bool setup_tables(THD *thd, TABLE_LIST *tables, Item **conds, tablenr= 0; } setup_table_map(table, table_list, tablenr); - table->used_keys= table->keys_for_keyread; + table->used_keys= table->s->keys_for_keyread; if (table_list->use_index) { key_map map; @@ -2876,12 +2895,13 @@ bool get_key_map_from_key_list(key_map *map, TABLE *table, map->clear_all(); while ((name=it++)) { - if (table->keynames.type_names == 0 || - (pos= find_type(&table->keynames, name->ptr(), name->length(), 1)) <= + if (table->s->keynames.type_names == 0 || + (pos= find_type(&table->s->keynames, name->ptr(), + name->length(), 1)) <= 0) { my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), - name->c_ptr(), table->real_name); + name->c_ptr(), table->s->table_name); map->set_all(); return 1; } @@ -2946,6 +2966,7 @@ insert_fields(THD *thd, TABLE_LIST *tables, const char *db_name, TABLE_LIST *last; TABLE_LIST *embedding; TABLE *table= tables->table; + bool alias_used= 0; if (!table_name || (!my_strcasecmp(table_alias_charset, table_name, tables->alias) && @@ -2972,7 +2993,8 @@ insert_fields(THD *thd, TABLE_LIST *tables, const char *db_name, DBUG_ASSERT(table != 0); table_iter.set(tables); if (check_grant_all_columns(thd, SELECT_ACL, &table->grant, - table->table_cache_key, table->real_name, + table->s->db, + table->s->table_name, &table_iter)) goto err; } @@ -3018,6 +3040,8 @@ insert_fields(THD *thd, TABLE_LIST *tables, const char *db_name, { iterator= &view_iter; view= 1; + alias_used= my_strcasecmp(table_alias_charset, + tables->table_name, tables->alias); } else { @@ -3039,15 +3063,19 @@ insert_fields(THD *thd, TABLE_LIST *tables, const char *db_name, ¬_used_field_index, TRUE)) { Item *item= iterator->item(thd); + if (view && !thd->lex->current_select->no_wrap_view_item) + { + /* + as far as we have view, then item point to view_iter, so we + can use it directly for this view specific operation + */ + item= new Item_ref(view_iter.item_ptr(), tables->view_name.str, + field_name); + } if (!found++) (void) it->replace(item); // Replace '*' else it->after(item); - if (view && !thd->lex->current_select->no_wrap_view_item) - { - item= new Item_ref(it->ref(), tables->view_name.str, - field_name); - } #ifndef NO_EMBEDDED_ACCESS_CHECKS if (any_privileges) { @@ -3066,7 +3094,7 @@ insert_fields(THD *thd, TABLE_LIST *tables, const char *db_name, else { db= tables->db; - tab= tables->real_name; + tab= tables->table_name; } if (!tables->schema_table && !(fld->have_privileges= (get_column_grant(thd, @@ -3101,9 +3129,15 @@ insert_fields(THD *thd, TABLE_LIST *tables, const char *db_name, else if (allocate_view_names && thd->lex->current_select->first_execution) { - Item_field *item= new Item_field(thd->strdup(tables->view_db.str), - thd->strdup(tables->view_name.str), - thd->strdup(field_name)); + Item_field *item; + if (alias_used) + item= new Item_field(0, + thd->strdup(tables->alias), + thd->strdup(field_name)); + else + item= new Item_field(thd->strdup(tables->view_db.str), + thd->strdup(tables->view_name.str), + thd->strdup(field_name)); /* during cleunup() this item will be put in list to replace expression from VIEW @@ -3117,7 +3151,7 @@ insert_fields(THD *thd, TABLE_LIST *tables, const char *db_name, fields marked in setup_tables during fix_fields of view columns */ if (table) - table->used_fields=table->fields; + table->used_fields= table->s->fields; } } if (found) @@ -3314,7 +3348,8 @@ int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves, COND **conds) thd->restore_backup_item_arena(arena, &backup); if (embedded->on_expr && !embedded->on_expr->fixed) { - if (embedded->on_expr->fix_fields(thd, tables, &table->on_expr)) + if (embedded->on_expr->fix_fields(thd, tables, + &embedded->on_expr)) goto err_no_arena; } } @@ -3491,14 +3526,14 @@ void remove_db_from_cache(const my_string db) for (uint idx=0 ; idx < open_cache.records ; idx++) { TABLE *table=(TABLE*) hash_element(&open_cache,idx); - if (!strcmp(table->table_cache_key,db)) + if (!strcmp(table->s->db, db)) { - table->version=0L; /* Free when thread is ready */ + table->s->version= 0L; /* Free when thread is ready */ if (!table->in_use) relink_unused(table); } } - while (unused_tables && !unused_tables->version) + while (unused_tables && !unused_tables->s->version) VOID(hash_delete(&open_cache,(byte*) unused_tables)); } @@ -3543,7 +3578,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, table = (TABLE*) hash_next(&open_cache,(byte*) key,key_length)) { THD *in_use; - table->version=0L; /* Free when thread is ready */ + table->s->version= 0L; /* Free when thread is ready */ if (!(in_use=table->in_use)) { DBUG_PRINT("info",("Table was not in use")); @@ -3583,7 +3618,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, else result= result || return_if_owned_by_thd; } - while (unused_tables && !unused_tables->version) + while (unused_tables && !unused_tables->s->version) VOID(hash_delete(&open_cache,(byte*) unused_tables)); DBUG_RETURN(result); } diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index 26b1eff49e7..f4345f8ce28 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -1031,7 +1031,6 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) { TABLE_LIST table_list; TABLE *tmptable; - Query_cache_table *table = block_table->parent; /* @@ -1042,8 +1041,9 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) */ for (tmptable= thd->temporary_tables; tmptable ; tmptable= tmptable->next) { - if (tmptable->key_length - TMP_TABLE_KEY_EXTRA == table->key_length() && - !memcmp(tmptable->table_cache_key, table->data(), + if (tmptable->s->key_length - TMP_TABLE_KEY_EXTRA == + table->key_length() && + !memcmp(tmptable->s->table_cache_key, table->data(), table->key_length())) { DBUG_PRINT("qcache", @@ -1063,7 +1063,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) bzero((char*) &table_list,sizeof(table_list)); table_list.db = table->db(); - table_list.alias= table_list.real_name= table->table(); + table_list.alias= table_list.table_name= table->table(); #ifndef NO_EMBEDDED_ACCESS_CHECKS if (check_table_access(thd,SELECT_ACL,&table_list,1)) { @@ -2056,7 +2056,7 @@ void Query_cache::invalidate_table(TABLE_LIST *table_list) uint key_length; Query_cache_block *table_block; key_length=(uint) (strmov(strmov(key,table_list->db)+1, - table_list->real_name) -key)+ 1; + table_list->table_name) -key)+ 1; // We don't store temporary tables => no key_length+=4 ... if ((table_block = (Query_cache_block*) @@ -2067,7 +2067,7 @@ void Query_cache::invalidate_table(TABLE_LIST *table_list) void Query_cache::invalidate_table(TABLE *table) { - invalidate_table((byte*) table->table_cache_key, table->key_length); + invalidate_table((byte*) table->s->table_cache_key, table->s->key_length); } void Query_cache::invalidate_table(byte * key, uint32 key_length) @@ -2116,18 +2116,18 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, { DBUG_PRINT("qcache", ("table %s, db %s, openinfo at 0x%lx, keylen %u, key at 0x%lx", - tables_used->real_name, tables_used->db, + tables_used->table_name, tables_used->db, (ulong) tables_used->table, - tables_used->table->key_length, - (ulong) tables_used->table->table_cache_key)); - block_table->n=n; - if (!insert_table(tables_used->table->key_length, - tables_used->table->table_cache_key, block_table, + tables_used->table->s->key_length, + (ulong) tables_used->table->s->table_cache_key)); + block_table->n= n; + if (!insert_table(tables_used->table->s->key_length, + tables_used->table->s->table_cache_key, block_table, tables_used->db_length, tables_used->table->file->table_cache_type())) break; - if (tables_used->table->db_type == DB_TYPE_MRG_MYISAM) + if (tables_used->table->s->db_type == DB_TYPE_MRG_MYISAM) { ha_myisammrg *handler = (ha_myisammrg *) tables_used->table->file; MYRG_INFO *file = handler->myrg_info(); @@ -2663,15 +2663,15 @@ TABLE_COUNTER_TYPE Query_cache::is_cacheable(THD *thd, uint32 query_len, { table_count++; DBUG_PRINT("qcache", ("table %s, db %s, type %u", - tables_used->real_name, - tables_used->db, tables_used->table->db_type)); + tables_used->table_name, + tables_used->db, tables_used->table->s->db_type)); *tables_type|= tables_used->table->file->table_cache_type(); /* table_alias_charset used here because it depends of lower_case_table_names variable */ - if (tables_used->table->tmp_table != NO_TMP_TABLE || + if (tables_used->table->s->tmp_table != NO_TMP_TABLE || (*tables_type & HA_CACHE_TBL_NOCACHE) || (tables_used->db_length == 5 && my_strnncoll(table_alias_charset, (uchar*)tables_used->db, 6, @@ -2682,7 +2682,7 @@ TABLE_COUNTER_TYPE Query_cache::is_cacheable(THD *thd, uint32 query_len, other non-cacheable table(s)")); DBUG_RETURN(0); } - if (tables_used->table->db_type == DB_TYPE_MRG_MYISAM) + if (tables_used->table->s->db_type == DB_TYPE_MRG_MYISAM) { ha_myisammrg *handler = (ha_myisammrg *)tables_used->table->file; MYRG_INFO *file = handler->myrg_info(); @@ -2729,8 +2729,8 @@ my_bool Query_cache::ask_handler_allowance(THD *thd, for (; tables_used; tables_used= tables_used->next_global) { TABLE *table= tables_used->table; - if (!ha_caching_allowed(thd, table->table_cache_key, - table->key_length, + if (!ha_caching_allowed(thd, table->s->table_cache_key, + table->s->key_length, table->file->table_cache_type())) { DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", diff --git a/sql/sql_class.cc b/sql/sql_class.cc index d369e85d775..c9545a0141e 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -650,7 +650,7 @@ void THD::add_changed_table(TABLE *table) DBUG_ASSERT((options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) && table->file->has_transactions()); - add_changed_table(table->table_cache_key, table->key_length); + add_changed_table(table->s->table_cache_key, table->s->key_length); DBUG_VOID_RETURN; } diff --git a/sql/sql_class.h b/sql/sql_class.h index 020403827fa..1ba3e2cb54d 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -32,7 +32,7 @@ class sp_cache; enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE }; enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME }; -enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_IGNORE, DUP_UPDATE }; +enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_UPDATE }; enum enum_log_type { LOG_CLOSED, LOG_TO_BE_OPENED, LOG_NORMAL, LOG_NEW, LOG_BIN}; enum enum_delay_key_write { DELAY_KEY_WRITE_NONE, DELAY_KEY_WRITE_ON, DELAY_KEY_WRITE_ALL }; @@ -225,12 +225,12 @@ typedef struct st_copy_info { ha_rows error_count; enum enum_duplicates handle_duplicates; int escape_char, last_errno; -/* for INSERT ... UPDATE */ + bool ignore; + /* for INSERT ... UPDATE */ List *update_fields; List *update_values; -/* for VIEW ... WITH CHECK OPTION */ + /* for VIEW ... WITH CHECK OPTION */ TABLE_LIST *view; - bool ignore; } COPY_INFO; @@ -1376,8 +1376,7 @@ class select_insert :public select_result_interceptor { select_insert(TABLE_LIST *table_list_par, TABLE *table_par, List *fields_par, List *update_fields, List *update_values, - enum_duplicates duplic, - bool ignore_check_option_errors); + enum_duplicates duplic, bool ignore); ~select_insert(); int prepare(List &list, SELECT_LEX_UNIT *u); bool send_data(List &items); @@ -1402,8 +1401,8 @@ public: HA_CREATE_INFO *create_info_par, List &fields_par, List &keys_par, - List &select_fields,enum_duplicates duplic) - :select_insert (NULL, NULL, &select_fields, 0, 0, duplic, 0), create_table(table), + List &select_fields,enum_duplicates duplic, bool ignore) + :select_insert (NULL, NULL, &select_fields, 0, 0, duplic, ignore), create_table(table), extra_fields(&fields_par),keys(&keys_par), create_info(create_info_par), lock(0) {} @@ -1674,12 +1673,12 @@ class multi_update :public select_result_interceptor uint table_count; Copy_field *copy_field; enum enum_duplicates handle_duplicates; - bool do_update, trans_safe, transactional_tables, log_delayed; + bool do_update, trans_safe, transactional_tables, log_delayed, ignore; public: multi_update(THD *thd_arg, TABLE_LIST *ut, TABLE_LIST *leaves_list, List *fields, List *values, - enum_duplicates handle_duplicates); + enum_duplicates handle_duplicates, bool ignore); ~multi_update(); int prepare(List &list, SELECT_LEX_UNIT *u); bool send_data(List &items); diff --git a/sql/sql_db.cc b/sql/sql_db.cc index d3b30de0bcd..26929ebc432 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -812,8 +812,8 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, if (!table_list) goto err; table_list->db= (char*) (table_list+1); - strmov(table_list->real_name= strmov(table_list->db,db)+1, file->name); - table_list->alias= table_list->real_name; // If lower_case_table_names=2 + strmov(table_list->table_name= strmov(table_list->db,db)+1, file->name); + table_list->alias= table_list->table_name; // If lower_case_table_names=2 /* Link into list */ (*tot_list_next)= table_list; tot_list_next= &table_list->next_local; diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index f4c5b0f8b59..15fbfcf928b 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -68,8 +68,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, DBUG_RETURN(TRUE); } - if (thd->lex->duplicates == DUP_IGNORE) - select_lex->no_error= 1; + select_lex->no_error= thd->lex->ignore; /* Test if the user wants to delete all rows and deletion doesn't have @@ -233,7 +232,7 @@ cleanup: delete select; transactional_table= table->file->has_transactions(); - log_delayed= (transactional_table || table->tmp_table); + log_delayed= (transactional_table || table->s->tmp_table); /* We write to the binary log even if we deleted no row, because maybe the user is using this command to ensure that a table is clean on master *and @@ -308,7 +307,7 @@ bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) } if (unique_table(table_list, table_list->next_global)) { - my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name); + my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name); DBUG_RETURN(TRUE); } select_lex->fix_prepare_information(thd, conds); @@ -367,7 +366,7 @@ bool mysql_multi_delete_prepare(THD *thd) check_key_in_view(thd, target_tbl->correspondent_table)) { my_error(ER_NON_UPDATABLE_TABLE, MYF(0), - target_tbl->real_name, "DELETE"); + target_tbl->table_name, "DELETE"); DBUG_RETURN(TRUE); } /* @@ -382,10 +381,10 @@ bool mysql_multi_delete_prepare(THD *thd) { if (un->first_select()->linkage != DERIVED_TABLE_TYPE && un->check_updateable(target_tbl->correspondent_table->db, - target_tbl->correspondent_table->real_name)) + target_tbl->correspondent_table->table_name)) { my_error(ER_UPDATE_TABLE_USED, MYF(0), - target_tbl->correspondent_table->real_name); + target_tbl->correspondent_table->table_name); DBUG_RETURN(TRUE); } } @@ -446,7 +445,7 @@ multi_delete::initialize_tables(JOIN *join) tbl->used_keys.clear_all(); if (tbl->file->has_transactions()) log_delayed= transactional_tables= 1; - else if (tbl->tmp_table != NO_TMP_TABLE) + else if (tbl->s->tmp_table != NO_TMP_TABLE) log_delayed= 1; else normal_tables= 1; @@ -718,19 +717,19 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) bzero((char*) &create_info,sizeof(create_info)); /* If it is a temporary table, close and regenerate it */ if (!dont_send_ok && (table_ptr=find_temporary_table(thd,table_list->db, - table_list->real_name))) + table_list->table_name))) { TABLE *table= *table_ptr; table->file->info(HA_STATUS_AUTO | HA_STATUS_NO_LOCK); - db_type table_type=table->db_type; - strmov(path,table->path); + db_type table_type= table->s->db_type; + strmov(path, table->s->path); *table_ptr= table->next; // Unlink table from list close_temporary(table,0); *fn_ext(path)=0; // Remove the .frm extension ha_create_table(path, &create_info,1); // We don't need to call invalidate() because this table is not in cache if ((error= (int) !(open_temporary_table(thd, path, table_list->db, - table_list->real_name, 1)))) + table_list->table_name, 1)))) (void) rm_temporary_table(table_type, path); /* If we return here we will not have logged the truncation to the bin log @@ -740,7 +739,7 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) } (void) sprintf(path,"%s/%s/%s%s",mysql_data_home,table_list->db, - table_list->real_name,reg_ext); + table_list->table_name,reg_ext); fn_format(path,path,"","",4); if (!dont_send_ok) @@ -749,7 +748,7 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) if ((table_type=get_table_type(path)) == DB_TYPE_UNKNOWN) { my_error(ER_NO_SUCH_TABLE, MYF(0), - table_list->db, table_list->real_name); + table_list->db, table_list->table_name); DBUG_RETURN(TRUE); } if (!ha_supports_generate(table_type)) diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 520393f8544..bed65f90c00 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -147,7 +147,7 @@ exit: { thd->clear_error(); my_error(ER_VIEW_INVALID, MYF(0), orig_table_list->db, - orig_table_list->real_name); + orig_table_list->table_name); } } @@ -168,9 +168,9 @@ exit: delete derived_result; orig_table_list->derived_result= derived_result; orig_table_list->table= table; - orig_table_list->real_name= table->real_name; + orig_table_list->table_name= (char*) table->s->table_name; table->derived_select_number= first_select->select_number; - table->tmp_table= TMP_TABLE; + table->s->tmp_table= TMP_TABLE; #ifndef NO_EMBEDDED_ACCESS_CHECKS table->grant.privilege= SELECT_ACL; #endif diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 2ee7734e4f3..c9c21d82568 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -152,7 +152,7 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) int error; DBUG_ENTER("mysql_ha_open"); DBUG_PRINT("enter",("'%s'.'%s' as '%s' reopen: %d", - tables->db, tables->real_name, tables->alias, + tables->db, tables->table_name, tables->alias, (int) reopen)); if (! hash_inited(&thd->handler_tables_hash)) @@ -206,7 +206,7 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) { /* copy the TABLE_LIST struct */ dblen= strlen(tables->db) + 1; - namelen= strlen(tables->real_name) + 1; + namelen= strlen(tables->table_name) + 1; aliaslen= strlen(tables->alias) + 1; if (!(my_multi_malloc(MYF(MY_WME), &hash_tables, sizeof(*hash_tables), @@ -218,10 +218,10 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) /* structure copy */ *hash_tables= *tables; hash_tables->db= db; - hash_tables->real_name= name; + hash_tables->table_name= name; hash_tables->alias= alias; memcpy(hash_tables->db, tables->db, dblen); - memcpy(hash_tables->real_name, tables->real_name, namelen); + memcpy(hash_tables->table_name, tables->table_name, namelen); memcpy(hash_tables->alias, tables->alias, aliaslen); /* add to hash */ @@ -266,7 +266,7 @@ bool mysql_ha_close(THD *thd, TABLE_LIST *tables) TABLE **table_ptr; DBUG_ENTER("mysql_ha_close"); DBUG_PRINT("enter",("'%s'.'%s' as '%s'", - tables->db, tables->real_name, tables->alias)); + tables->db, tables->table_name, tables->alias)); if ((hash_tables= (TABLE_LIST*) hash_search(&thd->handler_tables_hash, (byte*) tables->alias, @@ -348,7 +348,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, uint key_len; DBUG_ENTER("mysql_ha_read"); DBUG_PRINT("enter",("'%s'.'%s' as '%s'", - tables->db, tables->real_name, tables->alias)); + tables->db, tables->table_name, tables->alias)); LINT_INIT(key); LINT_INIT(key_len); @@ -363,7 +363,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, { table= hash_tables->table; DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' tab %p", - hash_tables->db, hash_tables->real_name, + hash_tables->db, hash_tables->table_name, hash_tables->alias, table)); if (!table) { @@ -378,7 +378,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, table= hash_tables->table; DBUG_PRINT("info",("re-opened '%s'.'%s' as '%s' tab %p", - hash_tables->db, hash_tables->real_name, + hash_tables->db, hash_tables->table_name, hash_tables->alias, table)); } @@ -398,7 +398,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, #if MYSQL_VERSION_ID < 40100 char buff[MAX_DBKEY_LENGTH]; if (*tables->db) - strxnmov(buff, sizeof(buff), tables->db, ".", tables->real_name, NullS); + strxnmov(buff, sizeof(buff), tables->db, ".", tables->table_name, NullS); else strncpy(buff, tables->alias, sizeof(buff)); my_error(ER_UNKNOWN_TABLE, MYF(0), buff, "HANDLER"); @@ -417,7 +417,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, if (keyname) { - if ((keyno=find_type(keyname, &table->keynames, 1+2)-1)<0) + if ((keyno=find_type(keyname, &table->s->keynames, 1+2)-1)<0) { my_error(ER_KEY_DOES_NOT_EXITS, MYF(0), keyname, tables->alias); goto err0; @@ -527,7 +527,7 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables, if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) { sql_print_error("mysql_ha_read: Got error %d when reading table '%s'", - error, tables->real_name); + error, tables->table_name); table->file->print_error(error,MYF(0)); goto err; } @@ -609,22 +609,22 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags) for (tmp_tables= tables ; tmp_tables; tmp_tables= tmp_tables->next_local) { DBUG_PRINT("info-in-tables-list",("'%s'.'%s' as '%s'", - tmp_tables->db, tmp_tables->real_name, + tmp_tables->db, tmp_tables->table_name, tmp_tables->alias)); /* Close all currently open handler tables with the same base table. */ table_ptr= &(thd->handler_tables); while (*table_ptr) { - if ((! *tmp_tables->db || - ! my_strcasecmp(&my_charset_latin1, (*table_ptr)->table_cache_key, + if ((!*tmp_tables->db || + !my_strcasecmp(&my_charset_latin1, (*table_ptr)->s->db, tmp_tables->db)) && - ! my_strcasecmp(&my_charset_latin1, (*table_ptr)->real_name, - tmp_tables->real_name)) + ! my_strcasecmp(&my_charset_latin1, (*table_ptr)->s->table_name, + tmp_tables->table_name)) { DBUG_PRINT("info",("*table_ptr '%s'.'%s' as '%s'", - (*table_ptr)->table_cache_key, - (*table_ptr)->real_name, - (*table_ptr)->table_name)); + (*table_ptr)->s->db, + (*table_ptr)->s->table_name, + (*table_ptr)->alias)); mysql_ha_flush_table(thd, table_ptr, mode_flags); continue; } @@ -641,7 +641,7 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags) while (*table_ptr) { if ((mode_flags & MYSQL_HA_FLUSH_ALL) || - ((*table_ptr)->version != refresh_version)) + ((*table_ptr)->s->version != refresh_version)) { mysql_ha_flush_table(thd, table_ptr, mode_flags); continue; @@ -677,12 +677,12 @@ static int mysql_ha_flush_table(THD *thd, TABLE **table_ptr, uint mode_flags) TABLE *table= *table_ptr; DBUG_ENTER("mysql_ha_flush_table"); DBUG_PRINT("enter",("'%s'.'%s' as '%s' flags: 0x%02x", - table->table_cache_key, table->real_name, - table->table_name, mode_flags)); + table->s->db, table->s->table_name, + table->alias, mode_flags)); if ((hash_tables= (TABLE_LIST*) hash_search(&thd->handler_tables_hash, - (byte*) (*table_ptr)->table_name, - strlen((*table_ptr)->table_name) + 1))) + (byte*) (*table_ptr)->alias, + strlen((*table_ptr)->alias) + 1))) { if (! (mode_flags & MYSQL_HA_REOPEN_ON_USAGE)) { diff --git a/sql/sql_help.cc b/sql/sql_help.cc index 99273b42f2a..7bf28a439b6 100644 --- a/sql/sql_help.cc +++ b/sql/sql_help.cc @@ -272,9 +272,9 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, DBUG_ENTER("get_topics_for_keyword"); if ((iindex_topic= find_type((char*) primary_key_name, - &topics->keynames, 1+2)-1)<0 || + &topics->s->keynames, 1+2)-1)<0 || (iindex_relations= find_type((char*) primary_key_name, - &relations->keynames, 1+2)-1)<0) + &relations->s->keynames, 1+2)-1)<0) { my_message(ER_CORRUPT_HELP_DB, ER(ER_CORRUPT_HELP_DB), MYF(0)); DBUG_RETURN(-1); @@ -620,16 +620,16 @@ bool mysqld_help(THD *thd, const char *mask) TABLE_LIST *leaves= 0; TABLE_LIST tables[4]; bzero((gptr)tables,sizeof(tables)); - tables[0].alias= tables[0].real_name= (char*) "help_topic"; + tables[0].alias= tables[0].table_name= (char*) "help_topic"; tables[0].lock_type= TL_READ; tables[0].next_global= tables[0].next_local= &tables[1]; - tables[1].alias= tables[1].real_name= (char*) "help_category"; + tables[1].alias= tables[1].table_name= (char*) "help_category"; tables[1].lock_type= TL_READ; tables[1].next_global= tables[1].next_local= &tables[2]; - tables[2].alias= tables[2].real_name= (char*) "help_relation"; + tables[2].alias= tables[2].table_name= (char*) "help_relation"; tables[2].lock_type= TL_READ; tables[2].next_global= tables[2].next_local= &tables[3]; - tables[3].alias= tables[3].real_name= (char*) "help_keyword"; + tables[3].alias= tables[3].table_name= (char*) "help_keyword"; tables[3].lock_type= TL_READ; tables[0].db= tables[1].db= tables[2].db= tables[3].db= (char*) "mysql"; diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 701ffe60cb3..2b30b4e6981 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -25,7 +25,7 @@ static int check_null_fields(THD *thd,TABLE *entry); #ifndef EMBEDDED_LIBRARY static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list); -static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup, +static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup, bool ignore, char *query, uint query_length, bool log_on); static void end_delayed_insert(THD *thd); extern "C" pthread_handler_decl(handle_delayed_insert,arg); @@ -71,7 +71,7 @@ check_insert_fields(THD *thd, TABLE_LIST *table_list, List &fields, table_list->view_db.str, table_list->view_name.str); return -1; } - if (values.elements != table->fields) + if (values.elements != table->s->fields) { my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter); return -1; @@ -82,7 +82,7 @@ check_insert_fields(THD *thd, TABLE_LIST *table_list, List &fields, Field_iterator_table fields; fields.set_table(table); if (check_grant_all_columns(thd, INSERT_ACL, &table->grant, - table->table_cache_key, table->real_name, + table->s->db, table->s->table_name, &fields)) return -1; } @@ -138,7 +138,7 @@ check_insert_fields(THD *thd, TABLE_LIST *table_list, List &fields, } // For the values we need select_priv #ifndef NO_EMBEDDED_ACCESS_CHECKS - table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege); + table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege); #endif if (check_key_in_view(thd, table_list) || @@ -158,7 +158,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, List &values_list, List &update_fields, List &update_values, - enum_duplicates duplic) + enum_duplicates duplic, + bool ignore) { int error, res; /* @@ -168,7 +169,6 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, */ bool log_on= (thd->options & OPTION_BIN_LOG) || (!(thd->master_access & SUPER_ACL)); bool transactional_table, log_delayed; - bool ignore_err= (thd->lex->duplicates == DUP_IGNORE); uint value_count; ulong counter = 1; ulonglong id; @@ -208,10 +208,10 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, { if (find_locked_table(thd, table_list->db ? table_list->db : thd->db, - table_list->real_name)) + table_list->table_name)) { my_error(ER_DELAYED_INSERT_TABLE_LOCKED, MYF(0), - table_list->real_name); + table_list->table_name); DBUG_RETURN(TRUE); } } @@ -271,18 +271,19 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, */ info.records= info.deleted= info.copied= info.updated= 0; + info.ignore= ignore; info.handle_duplicates=duplic; - info.update_fields=&update_fields; - info.update_values=&update_values; + info.update_fields= &update_fields; + info.update_values= &update_values; info.view= (table_list->view ? table_list : 0); - info.ignore= ignore_err; + /* Count warnings for all inserts. For single line insert, generate an error if try to set a NOT NULL field to NULL. */ thd->count_cuted_fields= ((values_list.elements == 1 && - duplic != DUP_IGNORE) ? + !ignore) ? CHECK_FIELD_ERROR_FOR_NULL : CHECK_FIELD_WARN); thd->cuted_fields = 0L; @@ -305,7 +306,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, table->file->start_bulk_insert(values_list.elements); thd->no_trans_update= 0; - thd->abort_on_warning= (duplic != DUP_IGNORE && + thd->abort_on_warning= (!ignore && (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))); @@ -320,7 +321,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, { if (fields.elements || !value_count) { - restore_record(table,default_values); // Get empty record + restore_record(table,s->default_values); // Get empty record if (fill_record(thd, fields, *values, 0)) { if (values_list.elements != 1 && !thd->net.report_error) @@ -340,9 +341,9 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, else { if (thd->used_tables) // Column used in values() - restore_record(table,default_values); // Get empty record + restore_record(table,s->default_values); // Get empty record else - table->record[0][0]=table->default_values[0]; // Fix delete marker + table->record[0][0]= table->s->default_values[0]; // Fix delete marker if (fill_record(thd, table->field, *values, 0)) { if (values_list.elements != 1 && ! thd->net.report_error) @@ -366,7 +367,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, if ((res= table_list->view_check_option(thd, (values_list.elements == 1 ? 0 : - ignore_err))) == + ignore))) == VIEW_CHECK_SKIP) continue; else if (res == VIEW_CHECK_ERROR) @@ -377,7 +378,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, #ifndef EMBEDDED_LIBRARY if (lock_type == TL_WRITE_DELAYED) { - error=write_delayed(thd,table,duplic,query, thd->query_length, log_on); + error=write_delayed(thd, table, duplic, ignore, query, thd->query_length, log_on); query=0; } else @@ -441,7 +442,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, transactional_table= table->file->has_transactions(); - log_delayed= (transactional_table || table->tmp_table); + log_delayed= (transactional_table || table->s->tmp_table); if ((info.copied || info.deleted || info.updated) && (error <= 0 || !transactional_table)) { @@ -490,7 +491,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, else { char buff[160]; - if (duplic == DUP_IGNORE) + if (ignore) sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, (lock_type == TL_WRITE_DELAYED) ? (ulong) 0 : (ulong) (info.records - info.copied), (ulong) thd->cuted_fields); @@ -702,7 +703,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table, if (!select_insert && unique_table(table_list, table_list->next_global)) { - my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name); + my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name); DBUG_RETURN(TRUE); } if (duplic == DUP_UPDATE || duplic == DUP_REPLACE) @@ -716,7 +717,7 @@ bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table, static int last_uniq_key(TABLE *table,uint keynr) { - while (++keynr < table->keys) + while (++keynr < table->s->keys) if (table->key_info[keynr].flags & HA_NOSAME) return 0; return 1; @@ -758,7 +759,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) */ if (info->handle_duplicates == DUP_REPLACE && table->next_number_field && - key_nr == table->next_number_index && + key_nr == table->s->next_number_index && table->file->auto_increment_column_changed) goto err; if (table->file->table_flags() & HA_DUPP_POS) @@ -776,7 +777,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) if (!key) { - if (!(key=(char*) my_safe_alloca(table->max_unique_length, + if (!(key=(char*) my_safe_alloca(table->s->max_unique_length, MAX_KEY_LENGTH))) { error=ENOMEM; @@ -851,7 +852,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) } else if ((error=table->file->write_row(table->record[0]))) { - if (info->handle_duplicates != DUP_IGNORE || + if (!info->ignore || (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)) goto err; table->file->restore_auto_increment(); @@ -859,7 +860,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info) else info->copied++; if (key) - my_safe_afree(key,table->max_unique_length,MAX_KEY_LENGTH); + my_safe_afree(key,table->s->max_unique_length,MAX_KEY_LENGTH); if (!table->file->has_transactions()) thd->no_trans_update= 1; DBUG_RETURN(0); @@ -906,13 +907,13 @@ public: char *record,*query; enum_duplicates dup; time_t start_time; - bool query_start_used,last_insert_id_used,insert_id_used, log_query; + bool query_start_used,last_insert_id_used,insert_id_used, ignore, log_query; ulonglong last_insert_id; timestamp_auto_set_type timestamp_field_type; uint query_length; - delayed_row(enum_duplicates dup_arg, bool log_query_arg) - :record(0),query(0),dup(dup_arg),log_query(log_query_arg) {} + delayed_row(enum_duplicates dup_arg, bool ignore_arg, bool log_query_arg) + :record(0), query(0), dup(dup_arg), ignore(ignore_arg), log_query(log_query_arg) {} ~delayed_row() { x_free(record); @@ -1018,7 +1019,7 @@ delayed_insert *find_handler(THD *thd, TABLE_LIST *table_list) while ((tmp=it++)) { if (!strcmp(tmp->thd.db,table_list->db) && - !strcmp(table_list->real_name,tmp->table->real_name)) + !strcmp(table_list->table_name,tmp->table->s->table_name)) { tmp->lock(); break; @@ -1059,7 +1060,7 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) thread_count++; pthread_mutex_unlock(&LOCK_thread_count); if (!(tmp->thd.db=my_strdup(table_list->db,MYF(MY_WME))) || - !(tmp->thd.query=my_strdup(table_list->real_name,MYF(MY_WME)))) + !(tmp->thd.query=my_strdup(table_list->table_name,MYF(MY_WME)))) { delete tmp; thd->fatal_error(); @@ -1069,7 +1070,7 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) } tmp->table_list= *table_list; // Needed to open table tmp->table_list.db= tmp->thd.db; - tmp->table_list.alias= tmp->table_list.real_name=tmp->thd.query; + tmp->table_list.alias= tmp->table_list.table_name= tmp->thd.query; tmp->lock(); pthread_mutex_lock(&tmp->mutex); if ((error=pthread_create(&tmp->thd.real_id,&connection_attrib, @@ -1167,17 +1168,19 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) client_thd->proc_info="allocating local table"; copy= (TABLE*) client_thd->alloc(sizeof(*copy)+ - (table->fields+1)*sizeof(Field**)+ - table->reclength); + (table->s->fields+1)*sizeof(Field**)+ + table->s->reclength); if (!copy) goto error; *copy= *table; - bzero((char*) ©->name_hash,sizeof(copy->name_hash)); // No name hashing + copy->s= ©->share_not_to_be_used; + // No name hashing + bzero((char*) ©->s->name_hash,sizeof(copy->s->name_hash)); /* We don't need to change the file handler here */ field=copy->field=(Field**) (copy+1); - copy->record[0]=(byte*) (field+table->fields+1); - memcpy((char*) copy->record[0],(char*) table->record[0],table->reclength); + copy->record[0]=(byte*) (field+table->s->fields+1); + memcpy((char*) copy->record[0],(char*) table->record[0],table->s->reclength); /* Make a copy of all fields */ @@ -1200,7 +1203,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) { /* Restore offset as this may have been reset in handle_inserts */ copy->timestamp_field= - (Field_timestamp*) copy->field[table->timestamp_field_offset]; + (Field_timestamp*) copy->field[table->s->timestamp_field_offset]; copy->timestamp_field->unireg_check= table->timestamp_field->unireg_check; copy->timestamp_field_type= copy->timestamp_field->get_auto_set_type(); } @@ -1224,7 +1227,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) /* Put a question in queue */ -static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, +static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, bool ignore, char *query, uint query_length, bool log_on) { delayed_row *row=0; @@ -1237,18 +1240,18 @@ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, pthread_cond_wait(&di->cond_client,&di->mutex); thd->proc_info="storing row into queue"; - if (thd->killed || !(row= new delayed_row(duplic, log_on))) + if (thd->killed || !(row= new delayed_row(duplic, ignore, log_on))) goto err; if (!query) query_length=0; - if (!(row->record= (char*) my_malloc(table->reclength+query_length+1, + if (!(row->record= (char*) my_malloc(table->s->reclength+query_length+1, MYF(MY_WME)))) goto err; - memcpy(row->record,table->record[0],table->reclength); + memcpy(row->record, table->record[0], table->s->reclength); if (query_length) { - row->query=row->record+table->reclength; + row->query= row->record+table->s->reclength; memcpy(row->query,query,query_length+1); } row->query_length= query_length; @@ -1262,7 +1265,7 @@ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, di->rows.push_back(row); di->stacked_inserts++; di->status=1; - if (table->blob_fields) + if (table->s->blob_fields) unlink_blobs(table); pthread_cond_signal(&di->cond); @@ -1379,7 +1382,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) if (!(di->table->file->table_flags() & HA_CAN_INSERT_DELAYED)) { thd->fatal_error(); - my_error(ER_ILLEGAL_HA, MYF(0), di->table_list.real_name); + my_error(ER_ILLEGAL_HA, MYF(0), di->table_list.table_name); goto end; } di->table->copy_blobs=1; @@ -1567,13 +1570,13 @@ bool delayed_insert::handle_inserts(void) if (thr_upgrade_write_delay_lock(*thd.lock->locks)) { /* This can only happen if thread is killed by shutdown */ - sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->real_name); + sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->s->table_name); goto err; } thd.proc_info="insert"; max_rows=delayed_insert_limit; - if (thd.killed || table->version != refresh_version) + if (thd.killed || table->s->version != refresh_version) { thd.killed= THD::KILL_CONNECTION; max_rows= ~0; // Do as much as possible @@ -1591,7 +1594,7 @@ bool delayed_insert::handle_inserts(void) { stacked_inserts--; pthread_mutex_unlock(&mutex); - memcpy(table->record[0],row->record,table->reclength); + memcpy(table->record[0],row->record,table->s->reclength); thd.start_time=row->start_time; thd.query_start_used=row->query_start_used; @@ -1600,8 +1603,9 @@ bool delayed_insert::handle_inserts(void) thd.insert_id_used=row->insert_id_used; table->timestamp_field_type= row->timestamp_field_type; + info.ignore= row->ignore; info.handle_duplicates= row->dup; - if (info.handle_duplicates == DUP_IGNORE || + if (info.ignore || info.handle_duplicates == DUP_REPLACE) { table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); @@ -1624,7 +1628,7 @@ bool delayed_insert::handle_inserts(void) Query_log_event qinfo(&thd, row->query, row->query_length, 0, FALSE); mysql_bin_log.write(&qinfo); } - if (table->blob_fields) + if (table->s->blob_fields) free_delayed_insert_blobs(table); thread_safe_sub(delayed_rows_in_use,1,&LOCK_delayed_status); thread_safe_increment(delayed_insert_writes,&LOCK_delayed_status); @@ -1659,7 +1663,7 @@ bool delayed_insert::handle_inserts(void) if (thr_reschedule_write_lock(*thd.lock->locks)) { /* This should never happen */ - sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->real_name); + sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->s->table_name); } if (!using_bin_log) table->file->extra(HA_EXTRA_WRITE_CACHE); @@ -1718,8 +1722,6 @@ bool delayed_insert::handle_inserts(void) bool mysql_insert_select_prepare(THD *thd) { LEX *lex= thd->lex; - TABLE_LIST *first_select_table= - (TABLE_LIST*) lex->select_lex.table_list.first; TABLE_LIST *first_select_leaf_table; int res; DBUG_ENTER("mysql_insert_select_prepare"); @@ -1756,7 +1758,8 @@ bool mysql_insert_select_prepare(THD *thd) select_insert::select_insert(TABLE_LIST *table_list_par, TABLE *table_par, List *fields_par, - List *update_fields, List *update_values, + List *update_fields, + List *update_values, enum_duplicates duplic, bool ignore_check_option_errors) :table_list(table_list_par), table(table_par), fields(fields_par), @@ -1800,15 +1803,14 @@ select_insert::prepare(List &values, SELECT_LEX_UNIT *u) thd->lex->current_select->join->select_options|= OPTION_BUFFER_RESULT; } - restore_record(table,default_values); // Get empty record + restore_record(table,s->default_values); // Get empty record table->next_number_field=table->found_next_number_field; thd->cuted_fields=0; - if (info.handle_duplicates == DUP_IGNORE || - info.handle_duplicates == DUP_REPLACE) + if (info.ignore || info.handle_duplicates == DUP_REPLACE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); table->file->start_bulk_insert((ha_rows) 0); thd->no_trans_update= 0; - thd->abort_on_warning= (info.handle_duplicates != DUP_IGNORE && + thd->abort_on_warning= (!info.ignore && (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))); @@ -1854,9 +1856,7 @@ bool select_insert::send_data(List &values) DBUG_RETURN(1); if (table_list) // Not CREATE ... SELECT { - switch (table_list->view_check_option(thd, - thd->lex->duplicates == - DUP_IGNORE)) { + switch (table_list->view_check_option(thd, info.ignore)) { case VIEW_CHECK_SKIP: DBUG_RETURN(0); case VIEW_CHECK_ERROR: @@ -1914,7 +1914,7 @@ void select_insert::send_error(uint errcode,const char *err) table->file->has_transactions(), FALSE); mysql_bin_log.write(&qinfo); } - if (!table->tmp_table) + if (!table->s->tmp_table) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } if (info.copied || info.deleted || info.updated) @@ -1942,7 +1942,7 @@ bool select_insert::send_eof() if (info.copied || info.deleted || info.updated) { query_cache_invalidate3(thd, table, 1); - if (!(table->file->has_transactions() || table->tmp_table)) + if (!(table->file->has_transactions() || table->s->tmp_table)) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } @@ -1965,7 +1965,7 @@ bool select_insert::send_eof() DBUG_RETURN(1); } char buff[160]; - if (info.handle_duplicates == DUP_IGNORE) + if (info.ignore) sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, (ulong) (info.records - info.copied), (ulong) thd->cuted_fields); else @@ -1992,28 +1992,27 @@ select_create::prepare(List &values, SELECT_LEX_UNIT *u) if (!table) DBUG_RETURN(-1); // abort() deletes table - if (table->fields < values.elements) + if (table->s->fields < values.elements) { my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), 1); DBUG_RETURN(-1); } /* First field to copy */ - field=table->field+table->fields - values.elements; + field=table->field+table->s->fields - values.elements; /* Don't set timestamp if used */ table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; table->next_number_field=table->found_next_number_field; - restore_record(table,default_values); // Get empty record + restore_record(table,s->default_values); // Get empty record thd->cuted_fields=0; - if (info.handle_duplicates == DUP_IGNORE || - info.handle_duplicates == DUP_REPLACE) + if (info.ignore || info.handle_duplicates == DUP_REPLACE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); table->file->start_bulk_insert((ha_rows) 0); thd->no_trans_update= 0; - thd->abort_on_warning= (info.handle_duplicates != DUP_IGNORE && + thd->abort_on_warning= (!info.ignore && (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))); @@ -2054,9 +2053,9 @@ bool select_create::send_eof() Check if we can remove the following two rows. We should be able to just keep the table in the table cache. */ - if (!table->tmp_table) + if (!table->s->tmp_table) { - ulong version= table->version; + ulong version= table->s->version; hash_delete(&open_cache,(byte*) table); /* Tell threads waiting for refresh that something has happened */ if (version != refresh_version) @@ -2080,19 +2079,19 @@ void select_create::abort() if (table) { table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); - enum db_type table_type=table->db_type; - if (!table->tmp_table) + enum db_type table_type=table->s->db_type; + if (!table->s->tmp_table) { - ulong version= table->version; + ulong version= table->s->version; hash_delete(&open_cache,(byte*) table); if (!create_info->table_existed) - quick_rm_table(table_type, create_table->db, create_table->real_name); + quick_rm_table(table_type, create_table->db, create_table->table_name); /* Tell threads waiting for refresh that something has happened */ if (version != refresh_version) VOID(pthread_cond_broadcast(&COND_refresh)); } else if (!create_info->table_existed) - close_temporary_table(thd, create_table->db, create_table->real_name); + close_temporary_table(thd, create_table->db, create_table->table_name); table=0; } VOID(pthread_mutex_unlock(&LOCK_open)); diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 1081246c9e3..5929ad5c14b 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -166,6 +166,7 @@ void lex_start(THD *thd, uchar *buf,uint length) lex->ignore_space=test(thd->variables.sql_mode & MODE_IGNORE_SPACE); lex->sql_command=SQLCOM_END; lex->duplicates= DUP_ERROR; + lex->ignore= 0; lex->sphead= NULL; lex->spcont= NULL; lex->proc_list.first= 0; diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 007a4601338..6ed5fb247dc 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -721,7 +721,7 @@ typedef struct st_lex /* special JOIN::prepare mode: changing of query is prohibited */ bool view_prepare_mode; bool safe_to_cache_query; - bool subqueries; + bool subqueries, ignore; bool variables_used; ALTER_INFO alter_info; /* Prepared statements SQL syntax:*/ diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 21dd2318504..7858632fff2 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -82,8 +82,8 @@ static int read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, List &fields, enum enum_duplicates handle_duplicates, - bool read_file_from_client,thr_lock_type lock_type, - bool ignore_check_option_errors) + bool ignore, + bool read_file_from_client,thr_lock_type lock_type) { char name[FN_REFLEN]; File file; @@ -133,7 +133,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, } table= table_list->table; transactional_table= table->file->has_transactions(); - log_delayed= (transactional_table || table->tmp_table); + log_delayed= (transactional_table || table->s->tmp_table); if (!fields.elements) { @@ -186,7 +186,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, /* We can't give an error in the middle when using LOCAL files */ if (read_file_from_client && handle_duplicates == DUP_ERROR) - handle_duplicates=DUP_IGNORE; + ignore= 1; #ifndef EMBEDDED_LIBRARY if (read_file_from_client) @@ -237,6 +237,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, COPY_INFO info; bzero((char*) &info,sizeof(info)); + info.ignore= ignore; info.handle_duplicates=handle_duplicates; info.escape_char=escaped->length() ? (*escaped)[0] : INT_MAX; @@ -256,8 +257,9 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, lf_info.thd = thd; lf_info.ex = ex; lf_info.db = db; - lf_info.table_name = table_list->real_name; + lf_info.table_name = table_list->table_name; lf_info.fields = &fields; + lf_info.ignore= ignore; lf_info.handle_dup = handle_duplicates; lf_info.wrote_create_file = 0; lf_info.last_pos_in_file = HA_POS_ERROR; @@ -266,7 +268,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, } #endif /*!EMBEDDED_LIBRARY*/ - restore_record(table,default_values); + restore_record(table, s->default_values); thd->count_cuted_fields= CHECK_FIELD_WARN; /* calc cuted fields */ thd->cuted_fields=0L; @@ -288,7 +290,7 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; table->next_number_field=table->found_next_number_field; - if (handle_duplicates == DUP_IGNORE || + if (ignore || handle_duplicates == DUP_REPLACE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); ha_enable_transaction(thd, FALSE); @@ -296,18 +298,18 @@ bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, table->copy_blobs=1; thd->no_trans_update= 0; - thd->abort_on_warning= (handle_duplicates != DUP_IGNORE && + thd->abort_on_warning= (!ignore && (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))); if (!field_term->length() && !enclosed->length()) error= read_fixed_length(thd, info, table_list, fields,read_info, - skip_lines, ignore_check_option_errors); + skip_lines, ignore); else error= read_sep_field(thd, info, table_list, fields, read_info, *enclosed, skip_lines, - ignore_check_option_errors); + ignore); if (table->file->end_bulk_insert()) error=1; /* purecov: inspected */ ha_enable_transaction(thd, TRUE); @@ -485,9 +487,8 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, ER(ER_WARN_TOO_MANY_RECORDS), thd->row_count); } - switch(table_list->view_check_option(thd, - ignore_check_option_errors)) - { + switch (table_list->view_check_option(thd, + ignore_check_option_errors)) { case VIEW_CHECK_SKIP: read_info.next_line(); goto continue_loop; @@ -607,9 +608,8 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, } } - switch(table_list->view_check_option(thd, - ignore_check_option_errors)) - { + switch (table_list->view_check_option(thd, + ignore_check_option_errors)) { case VIEW_CHECK_SKIP: read_info.next_line(); goto continue_loop; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 9ebeb9fe06d..4b484500527 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -52,6 +52,7 @@ #define SP_COM_STRING(LP) \ ((LP)->sql_command == SQLCOM_CREATE_SPFUNCTION || \ (LP)->sql_command == SQLCOM_ALTER_FUNCTION || \ + (LP)->sql_command == SQLCOM_SHOW_CREATE_FUNC || \ (LP)->sql_command == SQLCOM_DROP_FUNCTION ? \ "FUNCTION" : "PROCEDURE") @@ -1228,7 +1229,7 @@ int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd) if (!(table_list = (TABLE_LIST*) thd->calloc(sizeof(TABLE_LIST)))) DBUG_RETURN(1); // out of memory table_list->db= db; - table_list->real_name= table_list->alias= tbl_name; + table_list->table_name= table_list->alias= tbl_name; table_list->lock_type= TL_READ_NO_INSERT; table_list->prev_global= &table_list; // can be removed after merge with 4.1 @@ -1239,7 +1240,7 @@ int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd) } if (lower_case_table_names) my_casedn_str(files_charset_info, tbl_name); - remove_escape(table_list->real_name); + remove_escape(table_list->table_name); if (!(table=open_ltable(thd, table_list, TL_READ_NO_INSERT))) DBUG_RETURN(1); @@ -1601,7 +1602,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, pend= strend(packet); thd->convert_string(&conv_name, system_charset_info, packet, (uint) (pend-packet), thd->charset()); - table_list.alias= table_list.real_name= conv_name.str; + table_list.alias= table_list.table_name= conv_name.str; packet= pend+1; if (!my_strcasecmp(system_charset_info, table_list.db, @@ -1615,10 +1616,10 @@ bool dispatch_command(enum enum_server_command command, THD *thd, /* command not cachable => no gap for data base name */ if (!(thd->query=fields=thd->memdup(packet,thd->query_length+1))) break; - mysql_log.write(thd,command,"%s %s",table_list.real_name,fields); + mysql_log.write(thd,command,"%s %s",table_list.table_name, fields); if (lower_case_table_names) - my_casedn_str(files_charset_info, table_list.real_name); - remove_escape(table_list.real_name); // This can't have wildcards + my_casedn_str(files_charset_info, table_list.table_name); + remove_escape(table_list.table_name); // This can't have wildcards if (check_access(thd,SELECT_ACL,table_list.db,&table_list.grant.privilege, 0, 0)) @@ -1975,7 +1976,7 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, TABLE_LIST *table_list= (TABLE_LIST*) sel->table_list.first; char *db= table_list->db; remove_escape(db); // Fix escaped '_' - remove_escape(table_list->real_name); + remove_escape(table_list->table_name); if (check_access(thd,SELECT_ACL | EXTRA_ACL,db, &table_list->grant.privilege, 0, 0)) DBUG_RETURN(1); /* purecov: inspected */ @@ -2534,9 +2535,9 @@ mysql_execute_command(THD *thd) if (check_grant(thd, CREATE_ACL, all_tables, 0, 1, 0)) goto error; } - if (strlen(first_table->real_name) > NAME_LEN) + if (strlen(first_table->table_name) > NAME_LEN) { - my_error(ER_WRONG_TABLE_NAME, MYF(0), first_table->real_name); + my_error(ER_WRONG_TABLE_NAME, MYF(0), first_table->table_name); break; } pthread_mutex_lock(&LOCK_active_mi); @@ -2544,7 +2545,7 @@ mysql_execute_command(THD *thd) fetch_master_table will send the error to the client on failure. Give error if the table already exists. */ - if (!fetch_master_table(thd, first_table->db, first_table->real_name, + if (!fetch_master_table(thd, first_table->db, first_table->table_name, active_mi, 0, 0)) { send_ok(thd); @@ -2563,17 +2564,17 @@ mysql_execute_command(THD *thd) TABLE_LIST *select_tables= lex->query_tables; if ((res= create_table_precheck(thd, select_tables, create_table))) - goto create_error; + goto unsent_create_error; #ifndef HAVE_READLINK lex->create_info.data_file_name=lex->create_info.index_file_name=0; #else /* Fix names if symlinked tables */ if (append_file_to_dir(thd, &lex->create_info.data_file_name, - create_table->real_name) || + create_table->table_name) || append_file_to_dir(thd, &lex->create_info.index_file_name, - create_table->real_name)) - goto create_error; + create_table->table_name)) + goto unsent_create_error; #endif /* If we are using SET CHARSET without DEFAULT, add an implicit @@ -2604,8 +2605,8 @@ mysql_execute_command(THD *thd) if (!(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) && unique_table(create_table, select_tables)) { - my_error(ER_UPDATE_TABLE_USED, MYF(0), create_table->real_name); - goto create_error; + my_error(ER_UPDATE_TABLE_USED, MYF(0), create_table->table_name); + goto unsent_create_error; } /* If we create merge table, we have to test tables in merge, too */ if (lex->create_info.used_fields & HA_CREATE_USED_UNION) @@ -2617,8 +2618,8 @@ mysql_execute_command(THD *thd) { if (unique_table(tab, select_tables)) { - my_error(ER_UPDATE_TABLE_USED, MYF(0), tab->real_name); - goto create_error; + my_error(ER_UPDATE_TABLE_USED, MYF(0), tab->table_name); + goto unsent_create_error; } } } @@ -2628,7 +2629,8 @@ mysql_execute_command(THD *thd) lex->create_list, lex->key_list, select_lex->item_list, - lex->duplicates))) + lex->duplicates, + lex->ignore))) { /* CREATE from SELECT give its SELECT_LEX for SELECT, @@ -2653,7 +2655,7 @@ mysql_execute_command(THD *thd) else { res= mysql_create_table(thd, create_table->db, - create_table->real_name, &lex->create_info, + create_table->table_name, &lex->create_info, lex->create_list, lex->key_list, 0, 0); } @@ -2663,8 +2665,8 @@ mysql_execute_command(THD *thd) lex->link_first_table_back(create_table, link_to_local); break; -create_error: /* put tables back for PS rexecuting */ +unsent_create_error: lex->link_first_table_back(create_table, link_to_local); goto error; } @@ -2746,7 +2748,7 @@ create_error: { // Rename of table TABLE_LIST tmp_table; bzero((char*) &tmp_table,sizeof(tmp_table)); - tmp_table.real_name=lex->name; + tmp_table.table_name=lex->name; tmp_table.db=select_lex->db; tmp_table.grant.privilege=priv; if (check_grant(thd, INSERT_ACL | CREATE_ACL, &tmp_table, 0, @@ -2768,7 +2770,7 @@ create_error: lex->key_list, select_lex->order_list.elements, (ORDER *) select_lex->order_list.first, - lex->duplicates, &lex->alter_info); + lex->duplicates, lex->ignore, &lex->alter_info); } break; } @@ -2933,7 +2935,7 @@ create_error: select_lex->order_list.elements, (ORDER *) select_lex->order_list.first, select_lex->select_limit, - lex->duplicates)); + lex->duplicates, lex->ignore)); /* mysql_update return 2 if we need to switch to multi-update */ if (result != 2) break; @@ -2954,7 +2956,7 @@ create_error: &lex->value_list, select_lex->where, select_lex->options, - lex->duplicates, unit, select_lex); + lex->duplicates, lex->ignore, unit, select_lex); break; } case SQLCOM_REPLACE: @@ -2965,8 +2967,7 @@ create_error: break; res= mysql_insert(thd, all_tables, lex->field_list, lex->many_values, lex->update_list, lex->value_list, - (lex->value_list.elements ? - DUP_UPDATE : lex->duplicates)); + lex->duplicates, lex->ignore); if (first_table->view && !first_table->contain_auto_increment) thd->last_insert_id= 0; // do not show last insert ID if VIEW have not it break; @@ -2997,8 +2998,7 @@ create_error: if (!res && (result= new select_insert(first_table, first_table->table, &lex->field_list, &lex->update_list, &lex->value_list, - lex->duplicates, - lex->duplicates == DUP_IGNORE))) + lex->duplicates, lex->ignore))) { /* insert/replace from SELECT give its SELECT_LEX for SELECT, @@ -3194,8 +3194,8 @@ create_error: goto error; } res= mysql_load(thd, lex->exchange, first_table, lex->field_list, - lex->duplicates, (bool) lex->local_file, - lex->lock_option, lex->duplicates == DUP_IGNORE); + lex->duplicates, lex->ignore, (bool) lex->local_file, + lex->lock_option); break; } @@ -4331,8 +4331,8 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables, TABLE_LIST *org_tables=tables; for (; tables; tables= tables->next_global) { - if (tables->derived || tables->schema_table || - (tables->table && (int)tables->table->tmp_table) || + if (tables->derived || tables->schema_table || tables->belong_to_view || + (tables->table && (int)tables->table->s->tmp_table) || my_tz_check_n_skip_implicit_tables(&tables, thd->lex->time_zone_tables_used)) continue; @@ -4371,7 +4371,7 @@ check_procedure_access(THD *thd, ulong want_access,char *db, char *name, bzero((char *)tables, sizeof(TABLE_LIST)); tables->db= db; - tables->real_name= tables->alias= name; + tables->table_name= tables->alias= name; if ((thd->master_access & want_access) == want_access && !thd->db) tables->grant.privilege= want_access; @@ -5320,8 +5320,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, ptr->alias= alias_str; if (lower_case_table_names && table->table.length) my_casedn_str(files_charset_info, table->table.str); - ptr->real_name=table->table.str; - ptr->real_name_length=table->table.length; + ptr->table_name=table->table.str; + ptr->table_name_length=table->table.length; ptr->lock_type= lock_type; ptr->updating= test(table_options & TL_OPTION_UPDATING); ptr->force_index= test(table_options & TL_OPTION_FORCE_INDEX); @@ -5330,13 +5330,13 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, if (!my_strcasecmp(system_charset_info, ptr->db, information_schema_name.str)) { - ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, ptr->real_name); + ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, ptr->table_name); if (!schema_table || (schema_table->hidden && lex->orig_sql_command == SQLCOM_END)) // not a 'show' command { my_error(ER_UNKNOWN_TABLE, MYF(0), - ptr->real_name, information_schema_name.str); + ptr->table_name, information_schema_name.str); DBUG_RETURN(0); } ptr->schema_table= schema_table; @@ -6040,10 +6040,10 @@ bool mysql_create_index(THD *thd, TABLE_LIST *table_list, List &keys) bzero((char*) &create_info,sizeof(create_info)); create_info.db_type=DB_TYPE_DEFAULT; create_info.default_table_charset= thd->variables.collation_database; - DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->real_name, + DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->table_name, &create_info, table_list, fields, keys, 0, (ORDER*)0, - DUP_ERROR, &alter_info)); + DUP_ERROR, 0, &alter_info)); } @@ -6058,10 +6058,10 @@ bool mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info) create_info.default_table_charset= thd->variables.collation_database; alter_info->clear(); alter_info->flags= ALTER_DROP_INDEX; - DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->real_name, + DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->table_name, &create_info, table_list, fields, keys, 0, (ORDER*)0, - DUP_ERROR, alter_info)); + DUP_ERROR, 0, alter_info)); } @@ -6194,7 +6194,7 @@ bool multi_delete_precheck(THD *thd, TABLE_LIST *tables, uint *table_count) if (!walk) { my_error(ER_UNKNOWN_TABLE, MYF(0), - target_tbl->real_name, "MULTI DELETE"); + target_tbl->table_name, "MULTI DELETE"); DBUG_RETURN(TRUE); } walk->lock_type= target_tbl->lock_type; @@ -6344,7 +6344,7 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables, */ if (!(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) && find_table_in_global_list(tables, create_table->db, - create_table->real_name)) + create_table->table_name)) { error= FALSE; goto err; diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 4b6f1bd79fe..6364d5ae039 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -150,13 +150,18 @@ find_prepared_statement(THD *thd, ulong id, const char *where) static bool send_prep_stmt(Prepared_statement *stmt, uint columns) { NET *net= &stmt->thd->net; - char buff[9]; + char buff[12]; + uint tmp; DBUG_ENTER("send_prep_stmt"); buff[0]= 0; /* OK packet indicator */ int4store(buff+1, stmt->id); int2store(buff+5, columns); int2store(buff+7, stmt->param_count); + buff[9]= 0; // Guard against a 4.1 client + tmp= min(stmt->thd->total_warn_count, 65535); + int2store(buff+10, tmp); + /* Send types and names of placeholders to the client XXX: fix this nasty upcast from List to List @@ -901,6 +906,7 @@ static bool mysql_test_insert(Prepared_statement *stmt, DBUG_RETURN(TRUE); } + if ((values= its++)) { uint value_count; @@ -922,6 +928,16 @@ static bool mysql_test_insert(Prepared_statement *stmt, value_count= values->elements; its.rewind(); + res= TRUE; + + if (table_list->lock_type == TL_WRITE_DELAYED && + !(table_list->table->file->table_flags() & HA_CAN_INSERT_DELAYED)) + { + my_error(ER_ILLEGAL_HA, MYF(0), (table_list->view ? + table_list->view_name.str : + table_list->table_name)); + goto error; + } while ((values= its++)) { counter++; @@ -935,7 +951,7 @@ static bool mysql_test_insert(Prepared_statement *stmt, } } - res= 0; + res= FALSE; error: lex->unit.cleanup(); /* insert_values is cleared in open_table */ @@ -963,6 +979,9 @@ static int mysql_test_update(Prepared_statement *stmt, THD *thd= stmt->thd; uint table_count= 0; SELECT_LEX *select= &stmt->lex->select_lex; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + uint want_privilege; +#endif DBUG_ENTER("mysql_test_update"); if (update_precheck(thd, table_list)) @@ -985,17 +1004,29 @@ static int mysql_test_update(Prepared_statement *stmt, return 2; } + /* + thd->fill_derived_tables() is false here for sure (because it is + preparation of PS, so we even do not check it + */ if (lock_tables(thd, table_list, table_count) || - mysql_handle_derived(thd->lex, &mysql_derived_prepare) || - (thd->fill_derived_tables() && - mysql_handle_derived(thd->lex, &mysql_derived_filling))) + mysql_handle_derived(thd->lex, &mysql_derived_prepare)) DBUG_RETURN(1); +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* TABLE_LIST contain right privilages request */ + want_privilege= table_list->grant.want_privilege; +#endif + if (!(res= mysql_prepare_update(thd, table_list, &select->where, select->order_list.elements, (ORDER *) select->order_list.first))) { +#ifndef NO_EMBEDDED_ACCESS_CHECKS + table_list->grant.want_privilege= + table_list->table->grant.want_privilege= + want_privilege; +#endif thd->lex->select_lex.no_wrap_view_item= 1; if (setup_fields(thd, 0, table_list, select->item_list, 1, 0, 0)) { @@ -1005,6 +1036,12 @@ static int mysql_test_update(Prepared_statement *stmt, else { thd->lex->select_lex.no_wrap_view_item= 0; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* Check values */ + table_list->grant.want_privilege= + table_list->table->grant.want_privilege= + (SELECT_ACL & ~table_list->table->grant.privilege); +#endif if (setup_fields(thd, 0, table_list, stmt->lex->value_list, 0, 0, 0)) res= 1; @@ -1385,13 +1422,40 @@ static int mysql_test_multidelete(Prepared_statement *stmt, } +/* + Wrapper for mysql_insert_select_prepare, to make change of local tables + after open_and_lock_tables() call. + + SYNOPSIS + mysql_insert_select_prepare_tester() + thd thread handler + + NOTE: we need remove first local tables after open_and_lock_tables, + because mysql_handle_derived use local tables lists +*/ + +static bool mysql_insert_select_prepare_tester(THD *thd) +{ + SELECT_LEX *first_select= &thd->lex->select_lex; + /* Skip first table, which is the table we are inserting in */ + first_select->table_list.first= (byte*)((TABLE_LIST*)first_select-> + table_list.first)->next_local; + /* + insert/replace from SELECT give its SELECT_LEX for SELECT, + and item_list belong to SELECT + */ + first_select->resolve_mode= SELECT_LEX::SELECT_MODE; + return mysql_insert_select_prepare(thd); +} + + /* Validate and prepare for execution INSERT ... SELECT statement SYNOPSIS mysql_test_insert_select() stmt prepared statemen handler - tables list of tables queries + tables list of tables of query RETURN VALUE 0 success @@ -1406,26 +1470,23 @@ static int mysql_test_insert_select(Prepared_statement *stmt, LEX *lex= stmt->lex; TABLE_LIST *first_local_table; - if ((res= insert_precheck(stmt->thd, tables))) - return res; - first_local_table= (TABLE_LIST *)lex->select_lex.table_list.first; - DBUG_ASSERT(first_local_table != 0); - /* Skip first table, which is the table we are inserting in */ - lex->select_lex.table_list.first= (byte*) first_local_table->next_local; if (tables->table) { // don't allocate insert_values tables->table->insert_values=(byte *)1; } - /* - insert/replace from SELECT give its SELECT_LEX for SELECT, - and item_list belong to SELECT - */ - lex->select_lex.resolve_mode= SELECT_LEX::SELECT_MODE; - res= select_like_statement_test(stmt, tables, &mysql_insert_select_prepare, + if ((res= insert_precheck(stmt->thd, tables))) + return res; + + /* store it, because mysql_insert_select_prepare_tester change it */ + first_local_table= (TABLE_LIST *)lex->select_lex.table_list.first; + DBUG_ASSERT(first_local_table != 0); + + res= select_like_statement_test(stmt, tables, + &mysql_insert_select_prepare_tester, OPTION_SETUP_TABLES_DONE); - /* revert changes*/ + /* revert changes made by mysql_insert_select_prepare_tester */ lex->select_lex.table_list.first= (byte*) first_local_table; lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; return res; diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc index 1640fc1a634..8bc1891ef1b 100644 --- a/sql/sql_rename.cc +++ b/sql/sql_rename.cc @@ -149,8 +149,8 @@ rename_tables(THD *thd, TABLE_LIST *table_list, bool skip_error) } else { - old_alias= ren_table->real_name; - new_alias= new_table->real_name; + old_alias= ren_table->table_name; + new_alias= new_table->table_name; } sprintf(name,"%s/%s/%s%s",mysql_data_home, new_table->db, new_alias, reg_ext); diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 9f083e19146..8ba92015535 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1510,7 +1510,7 @@ err: int log_loaded_block(IO_CACHE* file) { - LOAD_FILE_INFO* lf_info; + LOAD_FILE_INFO *lf_info; uint block_len ; /* file->request_pos contains position where we started last read */ @@ -1532,7 +1532,7 @@ int log_loaded_block(IO_CACHE* file) { Create_file_log_event c(lf_info->thd,lf_info->ex,lf_info->db, lf_info->table_name, *lf_info->fields, - lf_info->handle_dup, buffer, + lf_info->handle_dup, lf_info->ignore, buffer, block_len, lf_info->log_delayed); mysql_bin_log.write(&c); lf_info->wrote_create_file = 1; diff --git a/sql/sql_repl.h b/sql/sql_repl.h index 71b25548da4..e8497fee343 100644 --- a/sql/sql_repl.h +++ b/sql/sql_repl.h @@ -67,7 +67,7 @@ typedef struct st_load_file_info enum enum_duplicates handle_dup; char* db; char* table_name; - bool wrote_create_file, log_delayed; + bool wrote_create_file, log_delayed, ignore; } LOAD_FILE_INFO; int log_loaded_block(IO_CACHE* file); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 0da5305e374..5d8ec56c273 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -529,7 +529,7 @@ JOIN::optimize() optimized= 1; // Ignore errors of execution if option IGNORE present - if (thd->lex->duplicates == DUP_IGNORE) + if (thd->lex->ignore) thd->lex->current_select->no_error= 1; #ifdef HAVE_REF_TO_FIELDS // Not done yet /* Add HAVING to WHERE if possible */ @@ -2152,7 +2152,7 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds, table->quick_keys.clear_all(); table->reginfo.join_tab=s; table->reginfo.not_exists_optimize=0; - bzero((char*) table->const_key_parts, sizeof(key_part_map)*table->keys); + bzero((char*) table->const_key_parts, sizeof(key_part_map)*table->s->keys); all_table_map|= table->map; s->join=join; s->info=0; // For describe @@ -2189,7 +2189,7 @@ make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds, continue; } - if ((table->system || table->file->records <= 1) && ! s->dependent && + if ((table->s->system || table->file->records <= 1) && ! s->dependent && !(table->file->table_flags() & HA_NOT_EXACT_COUNT) && !table->fulltext_searched) { @@ -2915,7 +2915,7 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array,KEY_FIELD *key_field) if (key_field->eq_func && !(key_field->optimize & KEY_OPTIMIZE_EXISTS)) { - for (uint key=0 ; key < form->keys ; key++) + for (uint key=0 ; key < form->s->keys ; key++) { if (!(form->keys_in_use_for_query.is_set(key))) continue; @@ -3427,8 +3427,8 @@ best_access_path(JOIN *join, records= ((double) s->records / (double) rec * (1.0 + - ((double) (table->max_key_length-keyinfo->key_length) / - (double) table->max_key_length))); + ((double) (table->s->max_key_length-keyinfo->key_length) / + (double) table->s->max_key_length))); if (records < 2.0) records=2.0; /* Can't be as good as a unique */ } @@ -4412,8 +4412,8 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, records= ((double) s->records / (double) rec * (1.0 + - ((double) (table->max_key_length-keyinfo->key_length) / - (double) table->max_key_length))); + ((double) (table->s->max_key_length-keyinfo->key_length) / + (double) table->s->max_key_length))); if (records < 2.0) records=2.0; // Can't be as good as a unique } @@ -4689,13 +4689,13 @@ static void calc_used_field_length(THD *thd, JOIN_TAB *join_tab) } } if (null_fields) - rec_length+=(join_tab->table->null_fields+7)/8; + rec_length+=(join_tab->table->s->null_fields+7)/8; if (join_tab->table->maybe_null) rec_length+=sizeof(my_bool); if (blobs) { uint blob_length=(uint) (join_tab->table->file->mean_rec_length- - (join_tab->table->reclength- rec_length)); + (join_tab->table->s->reclength- rec_length)); rec_length+=(uint) max(4,blob_length); } join_tab->used_fields=fields; @@ -5281,7 +5281,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) } if (tmp || !cond) { - DBUG_EXECUTE("where",print_where(tmp,tab->table->table_name);); + DBUG_EXECUTE("where",print_where(tmp,tab->table->alias);); SQL_SELECT *sel=tab->select=(SQL_SELECT*) join->thd->memdup((gptr) select, sizeof(SQL_SELECT)); if (!sel) @@ -7589,7 +7589,7 @@ static Field* create_tmp_field_from_field(THD *thd, Field* org_field, new_field->flags&= ~NOT_NULL_FLAG; // Because of outer join if (org_field->type() == MYSQL_TYPE_VAR_STRING || org_field->type() == MYSQL_TYPE_VARCHAR) - table->db_create_options|= HA_OPTION_PACK_RECORD; + table->s->db_create_options|= HA_OPTION_PACK_RECORD; } return new_field; } @@ -7805,7 +7805,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, char *tmpname,path[FN_REFLEN], filename[FN_REFLEN]; byte *pos,*group_buff; uchar *null_flags; - Field **reg_field, **from_field, **blob_field; + Field **reg_field, **from_field; + uint *blob_field; Copy_field *copy=0; KEY *keyinfo; KEY_PART_INFO *key_part_info; @@ -7854,7 +7855,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, if (!my_multi_malloc(MYF(MY_WME), &table,sizeof(*table), ®_field, sizeof(Field*)*(field_count+1), - &blob_field, sizeof(Field*)*(field_count+1), + &blob_field, sizeof(uint)*(field_count+1), &from_field, sizeof(Field*)*field_count, ©_func,sizeof(*copy_func)*(param->func_count+1), ¶m->keyinfo,sizeof(*param->keyinfo), @@ -7884,26 +7885,31 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, bzero((char*) reg_field,sizeof(Field*)*(field_count+1)); bzero((char*) from_field,sizeof(Field*)*field_count); table->field=reg_field; - table->blob_field= (Field_blob**) blob_field; - table->real_name=table->path=tmpname; - table->table_name= table_alias; + table->alias= table_alias; table->reginfo.lock_type=TL_WRITE; /* Will be updated */ table->db_stat=HA_OPEN_KEYFILE+HA_OPEN_RNDFILE; - table->blob_ptr_size=mi_portable_sizeof_char_ptr; table->map=1; - table->tmp_table= TMP_TABLE; - table->db_low_byte_first=1; // True for HEAP and MyISAM table->temp_pool_slot = temp_pool_slot; table->copy_blobs= 1; table->in_use= thd; - table->table_charset= param->table_charset; - table->keys_for_keyread.init(); - table->keys_in_use.init(); - table->read_only_keys.init(); table->quick_keys.init(); table->used_keys.init(); table->keys_in_use_for_query.init(); + table->s= &table->share_not_to_be_used; + table->s->blob_field= blob_field; + table->s->table_name= table->s->path= tmpname; + table->s->db= ""; + table->s->blob_ptr_size= mi_portable_sizeof_char_ptr; + table->s->tmp_table= TMP_TABLE; + table->s->db_low_byte_first=1; // True for HEAP and MyISAM + table->s->table_charset= param->table_charset; + table->s->keys_for_keyread.init(); + table->s->keys_in_use.init(); + /* For easier error reporting */ + table->s->table_cache_key= (char*) table->s->db= ""; + + /* Calculate which type of fields we will store in the temporary table */ reclength= string_total_length= 0; @@ -7947,13 +7953,13 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, if (!new_field) goto err; // Should be OOM tmp_from_field++; - *(reg_field++)= new_field; reclength+=new_field->pack_length(); if (new_field->flags & BLOB_FLAG) { - *blob_field++= new_field; + *blob_field++= (uint) (reg_field - table->field); blob_count++; } + *(reg_field++)= new_field; if (new_field->real_type() == MYSQL_TYPE_STRING || new_field->real_type() == MYSQL_TYPE_VARCHAR) { @@ -8005,7 +8011,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, total_uneven_bit_length+= new_field->field_length & 7; if (new_field->flags & BLOB_FLAG) { - *blob_field++= new_field; + *blob_field++= (uint) (reg_field - table->field); blob_count++; } if (item->marker == 4 && item->maybe_null) @@ -8027,7 +8033,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, (select_options & (OPTION_BIG_TABLES | SELECT_SMALL_RESULT)) == OPTION_BIG_TABLES) { - table->file=get_new_handler(table,table->db_type=DB_TYPE_MYISAM); + table->file=get_new_handler(table,table->s->db_type= DB_TYPE_MYISAM); if (group && (param->group_parts > table->file->max_key_parts() || param->group_length > table->file->max_key_length())) @@ -8035,13 +8041,13 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, } else { - table->file=get_new_handler(table,table->db_type=DB_TYPE_HEAP); + table->file=get_new_handler(table,table->s->db_type= DB_TYPE_HEAP); } if (!using_unique_constraint) reclength+= group_null_items; // null flag is stored separately - table->blob_fields=blob_count; + table->s->blob_fields= blob_count; if (blob_count == 0) { /* We need to ensure that first byte is not 0 for the delete link */ @@ -8063,15 +8069,15 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, string_total_length / string_count >= AVG_STRING_LENGTH_TO_PACK_ROWS)) use_packed_rows= 1; - table->fields=field_count; - table->reclength=reclength; + table->s->fields= field_count; + table->s->reclength= reclength; { uint alloc_length=ALIGN_SIZE(reclength+MI_UNIQUE_HASH_LENGTH+1); - table->rec_buff_length=alloc_length; + table->s->rec_buff_length= alloc_length; if (!(table->record[0]= (byte *) my_malloc(alloc_length*3, MYF(MY_WME)))) goto err; table->record[1]= table->record[0]+alloc_length; - table->default_values= table->record[1]+alloc_length; + table->s->default_values= table->record[1]+alloc_length; } copy_func[0]=0; // End marker @@ -8087,8 +8093,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, bfill(null_flags,null_pack_length,255); // Set null fields table->null_flags= (uchar*) table->record[0]; - table->null_fields= null_count+ hidden_null_count; - table->null_bytes= null_pack_length; + table->s->null_fields= null_count+ hidden_null_count; + table->s->null_bytes= null_pack_length; } null_count= (blob_count == 0) ? 1 : 0; hidden_field_count=param->hidden_field_count; @@ -8146,30 +8152,30 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, null_count=(null_count+7) & ~7; // move to next byte // fix table name in field entry - field->table_name= table->table_name; + field->table_name= &table->alias; } param->copy_field_end=copy; param->recinfo=recinfo; - store_record(table,default_values); // Make empty default record + store_record(table,s->default_values); // Make empty default record if (thd->variables.tmp_table_size == ~(ulong) 0) // No limit - table->max_rows= ~(ha_rows) 0; + table->s->max_rows= ~(ha_rows) 0; else - table->max_rows=(((table->db_type == DB_TYPE_HEAP) ? - min(thd->variables.tmp_table_size, - thd->variables.max_heap_table_size) : - thd->variables.tmp_table_size)/ table->reclength); - set_if_bigger(table->max_rows,1); // For dummy start options - keyinfo=param->keyinfo; + table->s->max_rows= (((table->s->db_type == DB_TYPE_HEAP) ? + min(thd->variables.tmp_table_size, + thd->variables.max_heap_table_size) : + thd->variables.tmp_table_size)/ table->s->reclength); + set_if_bigger(table->s->max_rows,1); // For dummy start options + keyinfo= param->keyinfo; if (group) { DBUG_PRINT("info",("Creating group key in temporary table")); table->group=group; /* Table is grouped by key */ param->group_buff=group_buff; - table->keys=1; - table->uniques= test(using_unique_constraint); + table->s->keys=1; + table->s->uniques= test(using_unique_constraint); table->key_info=keyinfo; keyinfo->key_part=key_part_info; keyinfo->flags=HA_NOSAME; @@ -8237,14 +8243,14 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, null_pack_length-=hidden_null_pack_length; keyinfo->key_parts= ((field_count-param->hidden_field_count)+ test(null_pack_length)); - set_if_smaller(table->max_rows, rows_limit); + set_if_smaller(table->s->max_rows, rows_limit); param->end_write_records= rows_limit; - table->distinct=1; - table->keys=1; + table->distinct= 1; + table->s->keys= 1; if (blob_count) { using_unique_constraint=1; - table->uniques=1; + table->s->uniques= 1; } if (!(key_part_info= (KEY_PART_INFO*) sql_calloc((keyinfo->key_parts)*sizeof(KEY_PART_INFO)))) @@ -8289,23 +8295,16 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, } if (thd->is_fatal_error) // If end of memory goto err; /* purecov: inspected */ - table->db_record_offset=1; - if (table->db_type == DB_TYPE_MYISAM) + table->s->db_record_offset= 1; + if (table->s->db_type == DB_TYPE_MYISAM) { if (create_myisam_tmp_table(table,param,select_options)) goto err; } - /* Set table_name for easier debugging */ - table->table_name= base_name(tmpname); if (!open_tmp_table(table)) DBUG_RETURN(table); err: - /* - Hack to ensure that free_blobs() doesn't fail if blob_field is not yet - complete - */ - *table->blob_field= 0; free_tmp_table(thd,table); /* purecov: inspected */ bitmap_clear_bit(&temp_pool, temp_pool_slot); DBUG_RETURN(NULL); /* purecov: inspected */ @@ -8315,7 +8314,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List &fields, static bool open_tmp_table(TABLE *table) { int error; - if ((error=table->file->ha_open(table->real_name,O_RDWR,HA_OPEN_TMP_TABLE))) + if ((error=table->file->ha_open(table->s->table_name,O_RDWR, + HA_OPEN_TMP_TABLE))) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ table->db_stat=0; @@ -8335,7 +8335,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, KEY *keyinfo=param->keyinfo; DBUG_ENTER("create_myisam_tmp_table"); - if (table->keys) + if (table->s->keys) { // Get keys for ni_create bool using_unique_constraint=0; HA_KEYSEG *seg= (HA_KEYSEG*) sql_calloc(sizeof(*seg) * @@ -8345,11 +8345,11 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, if (keyinfo->key_length >= table->file->max_key_length() || keyinfo->key_parts > table->file->max_key_parts() || - table->uniques) + table->s->uniques) { /* Can't create a key; Make a unique constraint instead of a key */ - table->keys=0; - table->uniques=1; + table->s->keys= 0; + table->s->uniques= 1; using_unique_constraint=1; bzero((char*) &uniquedef,sizeof(uniquedef)); uniquedef.keysegs=keyinfo->key_parts; @@ -8361,7 +8361,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, param->recinfo->type= FIELD_CHECK; param->recinfo->length=MI_UNIQUE_HASH_LENGTH; param->recinfo++; - table->reclength+=MI_UNIQUE_HASH_LENGTH; + table->s->reclength+=MI_UNIQUE_HASH_LENGTH; } else { @@ -8383,7 +8383,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, seg->type= ((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2); - seg->bit_start= field->pack_length() - table->blob_ptr_size; + seg->bit_start= field->pack_length() - table->s->blob_ptr_size; seg->flag= HA_BLOB_PART; seg->length=0; // Whole blob in unique constraint } @@ -8416,10 +8416,10 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, OPTION_BIG_TABLES) create_info.data_file_length= ~(ulonglong) 0; - if ((error=mi_create(table->real_name,table->keys,&keydef, + if ((error=mi_create(table->s->table_name,table->s->keys,&keydef, (uint) (param->recinfo-param->start_recinfo), param->start_recinfo, - table->uniques, &uniquedef, + table->s->uniques, &uniquedef, &create_info, HA_CREATE_TMP_TABLE))) { @@ -8429,7 +8429,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, } statistic_increment(table->in_use->status_var.created_tmp_disk_tables, &LOCK_status); - table->db_record_offset=1; + table->s->db_record_offset= 1; DBUG_RETURN(0); err: DBUG_RETURN(1); @@ -8441,7 +8441,7 @@ free_tmp_table(THD *thd, TABLE *entry) { const char *save_proc_info; DBUG_ENTER("free_tmp_table"); - DBUG_PRINT("enter",("table: %s",entry->table_name)); + DBUG_PRINT("enter",("table: %s",entry->alias)); save_proc_info=thd->proc_info; thd->proc_info="removing tmp table"; @@ -8457,8 +8457,9 @@ free_tmp_table(THD *thd, TABLE *entry) here and we have to ensure that delete_table gets the table name in the original case. */ - if (!(test_flags & TEST_KEEP_TMP_TABLES) || entry->db_type == DB_TYPE_HEAP) - entry->file->delete_table(entry->real_name); + if (!(test_flags & TEST_KEEP_TMP_TABLES) || + entry->s->db_type == DB_TYPE_HEAP) + entry->file->delete_table(entry->s->table_name); delete entry->file; } @@ -8488,14 +8489,15 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, int write_err; DBUG_ENTER("create_myisam_from_heap"); - if (table->db_type != DB_TYPE_HEAP || error != HA_ERR_RECORD_FILE_FULL) + if (table->s->db_type != DB_TYPE_HEAP || error != HA_ERR_RECORD_FILE_FULL) { table->file->print_error(error,MYF(0)); DBUG_RETURN(1); } new_table= *table; - new_table.db_type=DB_TYPE_MYISAM; - if (!(new_table.file=get_new_handler(&new_table,DB_TYPE_MYISAM))) + new_table.s= &new_table.share_not_to_be_used; + new_table.s->db_type= DB_TYPE_MYISAM; + if (!(new_table.file= get_new_handler(&new_table,DB_TYPE_MYISAM))) DBUG_RETURN(1); // End of memory save_proc_info=thd->proc_info; @@ -8545,10 +8547,11 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, /* remove heap table and change to use myisam table */ (void) table->file->ha_rnd_end(); (void) table->file->close(); - (void) table->file->delete_table(table->real_name); + (void) table->file->delete_table(table->s->table_name); delete table->file; table->file=0; - *table =new_table; + *table= new_table; + table->s= &table->share_not_to_be_used; table->file->change_table_ptr(table); thd->proc_info= (!strcmp(save_proc_info,"Copying to tmp table") ? "Copying to tmp table on disk" : save_proc_info); @@ -8560,7 +8563,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, (void) table->file->ha_rnd_end(); (void) new_table.file->close(); err1: - new_table.file->delete_table(new_table.real_name); + new_table.file->delete_table(new_table.s->table_name); delete new_table.file; err2: thd->proc_info=save_proc_info; @@ -8603,7 +8606,7 @@ do_select(JOIN *join,List *fields,TABLE *table,Procedure *procedure) { if (table->group && join->tmp_table_param.sum_func_count) { - if (table->keys) + if (table->s->keys) { DBUG_PRINT("info",("Using end_update")); end_select=end_update; @@ -9006,7 +9009,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) join_tab->found= 1; join_tab->not_null_compl= 0; /* The outer row is complemented by nulls for each inner tables */ - restore_record(join_tab->table,default_values); // Make empty record + restore_record(join_tab->table,s->default_values); // Make empty record mark_as_null_row(join_tab->table); // For group by without error select_cond= join_tab->select_cond; /* Check all attached conditions for inner table rows. */ @@ -9141,7 +9144,7 @@ int report_error(TABLE *table, int error) */ if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT) sql_print_error("Got error %d when reading table '%s'", - error, table->path); + error, table->s->path); table->file->print_error(error,MYF(0)); return 1; } @@ -9223,7 +9226,7 @@ join_read_system(JOIN_TAB *tab) if (table->status & STATUS_GARBAGE) // If first read { if ((error=table->file->read_first_row(table->record[0], - table->primary_key))) + table->s->primary_key))) { if (error != HA_ERR_END_OF_FILE) return report_error(table, error); @@ -9807,7 +9810,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param, error,1)) DBUG_RETURN(-1); // Not a table_is_full error - table->uniques=0; // To ensure rows are the same + table->s->uniques=0; // To ensure rows are the same } if (++join->send_records >= join->tmp_table_param.end_write_records && join->do_send_rows) @@ -10246,7 +10249,7 @@ uint find_shortest_key(TABLE *table, const key_map *usable_keys) uint best= MAX_KEY; if (!usable_keys->is_clear_all()) { - for (uint nr=0; nr < table->keys ; nr++) + for (uint nr=0; nr < table->s->keys ; nr++) { if (usable_keys->is_set(nr)) { @@ -10312,7 +10315,7 @@ test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts, KEY_PART_INFO *ref_key_part= table->key_info[ref].key_part; KEY_PART_INFO *ref_key_part_end= ref_key_part + ref_key_parts; - for (nr= 0 ; nr < table->keys ; nr++) + for (nr= 0 ; nr < table->s->keys ; nr++) { if (usable_keys->is_set(nr) && table->key_info[nr].key_length < min_length && @@ -10513,7 +10516,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, else keys= usable_keys; - for (nr=0; nr < table->keys ; nr++) + for (nr=0; nr < table->s->keys ; nr++) { uint not_used; if (keys.is_set(nr)) @@ -10627,7 +10630,7 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, goto err; } } - if (table->tmp_table) + if (table->s->tmp_table) table->file->info(HA_STATUS_VARIABLE); // Get record count table->sort.found_records=filesort(thd, table,sortorder, length, select, filesort_limit, &examined_rows); @@ -10703,7 +10706,7 @@ static bool compare_record(TABLE *table, Field **ptr) { for (; *ptr ; ptr++) { - if ((*ptr)->cmp_offset(table->rec_buff_length)) + if ((*ptr)->cmp_offset(table->s->rec_buff_length)) return 1; } return 0; @@ -10756,14 +10759,14 @@ remove_duplicates(JOIN *join, TABLE *entry,List &fields, Item *having) join->unit->select_limit_cnt= 1; // Only send first row DBUG_RETURN(0); } - Field **first_field=entry->field+entry->fields - field_count; - offset=entry->field[entry->fields - field_count]->offset(); - reclength=entry->reclength-offset; + Field **first_field=entry->field+entry->s->fields - field_count; + offset=entry->field[entry->s->fields - field_count]->offset(); + reclength=entry->s->reclength-offset; free_io_cache(entry); // Safety entry->file->info(HA_STATUS_VARIABLE); - if (entry->db_type == DB_TYPE_HEAP || - (!entry->blob_fields && + if (entry->s->db_type == DB_TYPE_HEAP || + (!entry->s->blob_fields && ((ALIGN_SIZE(reclength) + HASH_OVERHEAD) * entry->file->records < thd->variables.sortbuff_size))) error=remove_dup_with_hash_index(join->thd, entry, @@ -10785,7 +10788,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, char *org_record,*new_record; byte *record; int error; - ulong reclength=table->reclength-offset; + ulong reclength= table->s->reclength-offset; DBUG_ENTER("remove_dup_with_compare"); org_record=(char*) (record=table->record[0])+offset; @@ -11061,10 +11064,10 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count) } } /* Copy null bits from table */ - if (null_fields && tables[i].table->null_fields) + if (null_fields && tables[i].table->s->null_fields) { /* must copy null bits */ copy->str=(char*) tables[i].table->null_flags; - copy->length=tables[i].table->null_bytes; + copy->length= tables[i].table->s->null_bytes; copy->strip=0; copy->blob_field=0; length+=copy->length; @@ -12693,8 +12696,8 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, item_list.push_back(new Item_string(table_name_buffer, len, cs)); } else - item_list.push_back(new Item_string(table->table_name, - strlen(table->table_name), + item_list.push_back(new Item_string(table->alias, + strlen(table->alias), cs)); /* type */ item_list.push_back(new Item_string(join_type_str[tab->type], @@ -12704,7 +12707,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, if (!tab->keys.is_clear_all()) { uint j; - for (j=0 ; j < table->keys ; j++) + for (j=0 ; j < table->s->keys ; j++) { if (tab->keys.is_set(j)) { @@ -13002,8 +13005,8 @@ void st_table_list::print(THD *thd, String *str) } else { - append_identifier(thd, str, real_name, real_name_length); - cmp_name= real_name; + append_identifier(thd, str, table_name, table_name_length); + cmp_name= table_name; } } if (my_strcasecmp(table_alias_charset, cmp_name, alias)) diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 6ffad1e2bff..1e284891b90 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -311,7 +311,7 @@ mysql_find_files(THD *thd,List *files, const char *db,const char *path, if (db && !(col_access & TABLE_ACLS)) { table_list.db= (char*) db; - table_list.real_name=file->name; + table_list.table_name= file->name; table_list.grant.privilege=col_access; if (check_grant(thd, TABLE_ACLS, &table_list, 1, UINT_MAX, 1)) continue; @@ -342,7 +342,7 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) int res; DBUG_ENTER("mysqld_show_create"); DBUG_PRINT("enter",("db: %s table: %s",table_list->db, - table_list->real_name)); + table_list->table_name)); /* Only one table for now, but VIEW can involve several tables */ if (open_and_lock_tables(thd, table_list)) @@ -353,7 +353,7 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) if (thd->lex->only_view && !table_list->view) { my_error(ER_WRONG_OBJECT, MYF(0), - table_list->db, table_list->real_name, "VIEW"); + table_list->db, table_list->table_name, "VIEW"); DBUG_RETURN(TRUE); } @@ -395,7 +395,7 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list) if (table_list->schema_table) protocol->store(table_list->schema_table_name, system_charset_info); else - protocol->store(table->table_name, system_charset_info); + protocol->store(table->alias, system_charset_info); if (store_create_info(thd, table_list, &buffer)) DBUG_RETURN(TRUE); } @@ -535,7 +535,7 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild) TABLE *table; int res; DBUG_ENTER("mysqld_list_fields"); - DBUG_PRINT("enter",("table: %s",table_list->real_name)); + DBUG_PRINT("enter",("table: %s",table_list->table_name)); table_list->lock_type= TL_UNLOCK; if (open_and_lock_tables(thd, table_list)) @@ -551,7 +551,7 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild) !wild_case_compare(system_charset_info, field->field_name,wild)) field_list.push_back(new Item_field(field)); } - restore_record(table,default_values); // Get empty record + restore_record(table, s->default_values); // Get empty record if (thd->protocol->send_fields(&field_list, Protocol::SEND_DEFAULTS | Protocol::SEND_EOF)) DBUG_VOID_RETURN; @@ -566,7 +566,7 @@ mysqld_dump_create_info(THD *thd, TABLE_LIST *table_list, int fd) Protocol *protocol= thd->protocol; String *packet= protocol->storage_packet(); DBUG_ENTER("mysqld_dump_create_info"); - DBUG_PRINT("enter",("table: %s",table_list->table->real_name)); + DBUG_PRINT("enter",("table: %s",table_list->table->s->table_name)); protocol->prepare_for_resend(); if (store_create_info(thd, table_list, packet)) @@ -715,13 +715,15 @@ static int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet) { List field_list; - char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], *end, *alias; + char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], *end; + const char *alias; String type(tmp, sizeof(tmp), system_charset_info); Field **ptr,*field; uint primary_key; KEY *key_info; TABLE *table= table_list->table; handler *file= table->file; + TABLE_SHARE *share= table->s; HA_CREATE_INFO create_info; my_bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL | MODE_ORACLE | @@ -732,21 +734,20 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet) my_bool limited_mysql_mode= (thd->variables.sql_mode & (MODE_NO_FIELD_OPTIONS | MODE_MYSQL323 | MODE_MYSQL40)) != 0; - DBUG_ENTER("store_create_info"); - DBUG_PRINT("enter",("table: %s",table->real_name)); + DBUG_PRINT("enter",("table: %s", table->s->table_name)); - restore_record(table,default_values); // Get empty record + restore_record(table, s->default_values); // Get empty record - if (table->tmp_table) + if (share->tmp_table) packet->append("CREATE TEMPORARY TABLE ", 23); else packet->append("CREATE TABLE ", 13); if (table_list->schema_table) alias= table_list->schema_table_name; else - alias= (lower_case_table_names == 2 ? table->table_name : - table->real_name); + alias= (lower_case_table_names == 2 ? table->alias : + share->table_name); append_identifier(thd, packet, alias, strlen(alias)); packet->append(" (\n", 3); @@ -773,7 +774,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet) if (field->has_charset() && !limited_mysql_mode && !foreign_db_mode) { - if (field->charset() != table->table_charset) + if (field->charset() != share->table_charset) { packet->append(" character set ", 15); packet->append(field->charset()->csname); @@ -859,9 +860,9 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet) file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_TIME); bzero((char*) &create_info, sizeof(create_info)); file->update_create_info(&create_info); - primary_key= table->primary_key; + primary_key= share->primary_key; - for (uint i=0 ; i < table->keys ; i++,key_info++) + for (uint i=0 ; i < share->keys ; i++,key_info++) { KEY_PART_INFO *key_part= key_info->key_part; bool found_primary=0; @@ -886,7 +887,7 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet) if (!(thd->variables.sql_mode & MODE_NO_KEY_OPTIONS) && !limited_mysql_mode && !foreign_db_mode) { - if (table->db_type == DB_TYPE_HEAP && + if (share->db_type == DB_TYPE_HEAP && key_info->algorithm == HA_KEY_ALG_BTREE) packet->append(" TYPE BTREE", 11); @@ -941,58 +942,58 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet) packet->append(" ENGINE=", 8); packet->append(file->table_type()); - if (table->table_charset && + if (share->table_charset && !(thd->variables.sql_mode & MODE_MYSQL323) && !(thd->variables.sql_mode & MODE_MYSQL40)) { packet->append(" DEFAULT CHARSET=", 17); - packet->append(table->table_charset->csname); - if (!(table->table_charset->state & MY_CS_PRIMARY)) + packet->append(share->table_charset->csname); + if (!(share->table_charset->state & MY_CS_PRIMARY)) { packet->append(" COLLATE=", 9); - packet->append(table->table_charset->name); + packet->append(table->s->table_charset->name); } } - if (table->min_rows) + if (share->min_rows) { packet->append(" MIN_ROWS=", 10); - end= longlong10_to_str(table->min_rows, buff, 10); + end= longlong10_to_str(share->min_rows, buff, 10); packet->append(buff, (uint) (end- buff)); } - if (table->max_rows) + if (share->max_rows) { packet->append(" MAX_ROWS=", 10); - end= longlong10_to_str(table->max_rows, buff, 10); + end= longlong10_to_str(share->max_rows, buff, 10); packet->append(buff, (uint) (end - buff)); } - if (table->avg_row_length) + if (share->avg_row_length) { packet->append(" AVG_ROW_LENGTH=", 16); - end= longlong10_to_str(table->avg_row_length, buff,10); + end= longlong10_to_str(share->avg_row_length, buff,10); packet->append(buff, (uint) (end - buff)); } - if (table->db_create_options & HA_OPTION_PACK_KEYS) + if (share->db_create_options & HA_OPTION_PACK_KEYS) packet->append(" PACK_KEYS=1", 12); - if (table->db_create_options & HA_OPTION_NO_PACK_KEYS) + if (share->db_create_options & HA_OPTION_NO_PACK_KEYS) packet->append(" PACK_KEYS=0", 12); - if (table->db_create_options & HA_OPTION_CHECKSUM) + if (share->db_create_options & HA_OPTION_CHECKSUM) packet->append(" CHECKSUM=1", 11); - if (table->db_create_options & HA_OPTION_DELAY_KEY_WRITE) + if (share->db_create_options & HA_OPTION_DELAY_KEY_WRITE) packet->append(" DELAY_KEY_WRITE=1",18); - if (table->row_type != ROW_TYPE_DEFAULT) + if (share->row_type != ROW_TYPE_DEFAULT) { packet->append(" ROW_FORMAT=",12); - packet->append(ha_row_type[(uint) table->row_type]); + packet->append(ha_row_type[(uint) share->row_type]); } table->file->append_create_info(packet); - if (table->comment && table->comment[0]) + if (share->comment && share->comment[0]) { packet->append(" COMMENT=", 9); - append_unescaped(packet, table->comment, strlen(table->comment)); + append_unescaped(packet, share->comment, strlen(share->comment)); } if (file->raid_type) { @@ -1516,7 +1517,7 @@ static bool show_status_array(THD *thd, const char *wild, default: break; } - restore_record(table, default_values); + restore_record(table, s->default_values); table->field[0]->store(name_buffer, strlen(name_buffer), system_charset_info); table->field[1]->store(pos, (uint32) (end - pos), system_charset_info); @@ -1802,7 +1803,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) bool res= open_and_lock_tables(thd, show_table_list); if (schema_table->process_table(thd, show_table_list, table, res, show_table_list->db, - show_table_list->real_name)) + show_table_list->table_name)) { DBUG_RETURN(1); } @@ -1870,7 +1871,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) List_iterator_fast it(files); while ((file_name=it++)) { - restore_record(table, default_values); + restore_record(table, s->default_values); table->field[schema_table->idx_field1]-> store(base_name, strlen(base_name), system_charset_info); table->field[schema_table->idx_field2]-> @@ -1939,7 +1940,7 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) void store_schema_shemata(TABLE *table, const char *db_name, const char* cs_name) { - restore_record(table, default_values); + restore_record(table, s->default_values); table->field[1]->store(db_name, strlen(db_name), system_charset_info); table->field[2]->store(cs_name, strlen(cs_name), system_charset_info); table->file->write_row(table->record[0]); @@ -2009,9 +2010,9 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables, const char *tmp_buff; TIME time; CHARSET_INFO *cs= system_charset_info; - DBUG_ENTER("get_schema_tables_record"); - restore_record(table, default_values); + + restore_record(table, s->default_values); table->field[1]->store(base_name, strlen(base_name), cs); table->field[2]->store(file_name, strlen(file_name), cs); if (res) @@ -2031,9 +2032,11 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables, else { TABLE *show_table= tables->table; + TABLE_SHARE *share= show_table->s; handler *file= show_table->file; + file->info(HA_STATUS_VARIABLE | HA_STATUS_TIME | HA_STATUS_NO_LOCK); - if (show_table->tmp_table == TMP_TABLE) + if (share->tmp_table == TMP_TABLE) table->field[3]->store("TEMPORARY", 9, cs); else table->field[3]->store("BASE TABLE", 10, cs); @@ -2046,10 +2049,10 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables, } tmp_buff= file->table_type(); table->field[4]->store(tmp_buff, strlen(tmp_buff), cs); - table->field[5]->store((longlong) show_table->frm_version); - tmp_buff= ((show_table->db_options_in_use & + table->field[5]->store((longlong) share->frm_version); + tmp_buff= ((share->db_options_in_use & HA_OPTION_COMPRESS_RECORD) ? "Compressed" : - (show_table->db_options_in_use & HA_OPTION_PACK_RECORD) ? + (share->db_options_in_use & HA_OPTION_PACK_RECORD) ? "Dynamic" : "Fixed"); table->field[6]->store(tmp_buff, strlen(tmp_buff), cs); if (!tables->schema_table) @@ -2095,8 +2098,8 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables, table->field[16]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); table->field[16]->set_notnull(); } - tmp_buff= (show_table->table_charset ? show_table-> - table_charset->name : "default"); + tmp_buff= (share->table_charset ? + share->table_charset->name : "default"); table->field[17]->store(tmp_buff, strlen(tmp_buff), cs); if (file->table_flags() & (ulong) HA_HAS_CHECKSUM) { @@ -2106,32 +2109,32 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables, char option_buff[350],*ptr; ptr=option_buff; - if (show_table->min_rows) + if (share->min_rows) { ptr=strmov(ptr," min_rows="); - ptr=longlong10_to_str(show_table->min_rows,ptr,10); + ptr=longlong10_to_str(share->min_rows,ptr,10); } - if (show_table->max_rows) + if (share->max_rows) { ptr=strmov(ptr," max_rows="); - ptr=longlong10_to_str(show_table->max_rows,ptr,10); + ptr=longlong10_to_str(share->max_rows,ptr,10); } - if (show_table->avg_row_length) + if (share->avg_row_length) { ptr=strmov(ptr," avg_row_length="); - ptr=longlong10_to_str(show_table->avg_row_length,ptr,10); + ptr=longlong10_to_str(share->avg_row_length,ptr,10); } - if (show_table->db_create_options & HA_OPTION_PACK_KEYS) + if (share->db_create_options & HA_OPTION_PACK_KEYS) ptr=strmov(ptr," pack_keys=1"); - if (show_table->db_create_options & HA_OPTION_NO_PACK_KEYS) + if (share->db_create_options & HA_OPTION_NO_PACK_KEYS) ptr=strmov(ptr," pack_keys=0"); - if (show_table->db_create_options & HA_OPTION_CHECKSUM) + if (share->db_create_options & HA_OPTION_CHECKSUM) ptr=strmov(ptr," checksum=1"); - if (show_table->db_create_options & HA_OPTION_DELAY_KEY_WRITE) + if (share->db_create_options & HA_OPTION_DELAY_KEY_WRITE) ptr=strmov(ptr," delay_key_write=1"); - if (show_table->row_type != ROW_TYPE_DEFAULT) + if (share->row_type != ROW_TYPE_DEFAULT) ptr=strxmov(ptr, " row_format=", - ha_row_type[(uint) show_table->row_type], + ha_row_type[(uint) share->row_type], NullS); if (file->raid_type) { @@ -2146,13 +2149,13 @@ static int get_schema_tables_record(THD *thd, struct st_table_list *tables, (ptr == option_buff ? 0 : (uint) (ptr-option_buff)-1), cs); { - char *comment= show_table->file-> - update_table_comment(show_table->comment); + char *comment; + comment= show_table->file->update_table_comment(share->comment); if (comment) { table->field[20]->store(comment, strlen(comment), cs); - if (comment != show_table->comment) - my_free(comment,MYF(0)); + if (comment != share->comment) + my_free(comment, MYF(0)); } } } @@ -2190,7 +2193,7 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables, TABLE *show_table= tables->table; handler *file= show_table->file; file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); - restore_record(show_table, default_values); + restore_record(show_table, s->default_values); Field **ptr,*field; int count= 0; for (ptr=show_table->field; (field= *ptr) ; ptr++) @@ -2207,7 +2210,7 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables, String type(tmp,sizeof(tmp), system_charset_info); char tmp_buffer[128]; count++; - restore_record(table, default_values); + restore_record(table, s->default_values); table->field[1]->store(base_name, strlen(base_name), cs); table->field[2]->store(file_name, strlen(file_name), cs); table->field[3]->store(field->field_name, strlen(field->field_name), @@ -2317,7 +2320,7 @@ static int get_schema_column_record(THD *thd, struct st_table_list *tables, check_access(thd,SELECT_ACL | EXTRA_ACL, base_name, &tables->grant.privilege, 0, 0); col_access= get_column_grant(thd, &tables->grant, tables->db, - tables->real_name, + tables->table_name, field->field_name) & COL_ACLS; for (uint bitnr=0; col_access ; col_access>>=1,bitnr++) { @@ -2354,7 +2357,7 @@ int fill_schema_charsets(THD *thd, TABLE_LIST *tables, COND *cond) !(wild && wild[0] && wild_case_compare(scs, tmp_cs->csname,wild))) { - restore_record(table, default_values); + restore_record(table, s->default_values); table->field[0]->store(tmp_cs->csname, strlen(tmp_cs->csname), scs); table->field[1]->store(tmp_cs->name, strlen(tmp_cs->name), scs); table->field[2]->store(tmp_cs->comment ? tmp_cs->comment : "", @@ -2391,7 +2394,7 @@ int fill_schema_collation(THD *thd, TABLE_LIST *tables, COND *cond) wild_case_compare(scs, tmp_cl->name,wild))) { const char *tmp_buff; - restore_record(table, default_values); + restore_record(table, s->default_values); table->field[0]->store(tmp_cl->name, strlen(tmp_cl->name), scs); table->field[1]->store(tmp_cl->csname , strlen(tmp_cl->csname), scs); table->field[2]->store((longlong) tmp_cl->number); @@ -2427,7 +2430,7 @@ int fill_schema_coll_charset_app(THD *thd, TABLE_LIST *tables, COND *cond) if (!tmp_cl || !(tmp_cl->state & MY_CS_AVAILABLE) || !my_charset_same(tmp_cs,tmp_cl)) continue; - restore_record(table, default_values); + restore_record(table, s->default_values); table->field[0]->store(tmp_cl->name, strlen(tmp_cl->name), scs); table->field[1]->store(tmp_cl->csname , strlen(tmp_cl->csname), scs); table->file->write_row(table->record[0]); @@ -2445,7 +2448,7 @@ void store_schema_proc(THD *thd, TABLE *table, TIME time; LEX *lex= thd->lex; CHARSET_INFO *cs= system_charset_info; - restore_record(table, default_values); + restore_record(table, s->default_values); if (lex->orig_sql_command == SQLCOM_SHOW_STATUS_PROC && proc_table->field[2]->val_int() == TYPE_ENUM_PROCEDURE || lex->orig_sql_command == SQLCOM_SHOW_STATUS_FUNC && @@ -2520,7 +2523,7 @@ int fill_schema_proc(THD *thd, TABLE_LIST *tables, COND *cond) bzero((char*) &proc_tables,sizeof(proc_tables)); proc_tables.db= (char*) "mysql"; - proc_tables.real_name= proc_tables.alias= (char*) "proc"; + proc_tables.table_name= proc_tables.alias= (char*) "proc"; proc_tables.lock_type= TL_READ; if (!(proc_table= open_ltable(thd, &proc_tables, TL_READ))) { @@ -2573,13 +2576,13 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables, show_table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_TIME); - for (uint i=0 ; i < show_table->keys ; i++,key_info++) + for (uint i=0 ; i < show_table->s->keys ; i++,key_info++) { KEY_PART_INFO *key_part= key_info->key_part; const char *str; for (uint j=0 ; j < key_info->key_parts ; j++,key_part++) { - restore_record(table, default_values); + restore_record(table, s->default_values); table->field[1]->store(base_name, strlen(base_name), cs); table->field[2]->store(file_name, strlen(file_name), cs); table->field[3]->store((longlong) ((key_info->flags & @@ -2618,7 +2621,7 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables, table->field[12]->store(pos, strlen(pos), cs); pos= show_table->file->index_type(i); table->field[13]->store(pos, strlen(pos), cs); - if (!show_table->keys_in_use.is_set(i)) + if (!show_table->s->keys_in_use.is_set(i)) table->field[14]->store("disabled", 8, cs); else table->field[14]->store("", 0, cs); @@ -2642,9 +2645,10 @@ static int get_schema_views_record(THD *thd, struct st_table_list *tables, { if (tables->view) { - restore_record(table, default_values); + restore_record(table, s->default_values); table->field[1]->store(tables->view_db.str, tables->view_db.length, cs); - table->field[2]->store(tables->view_name.str,tables->view_name.length,cs); + table->field[2]->store(tables->view_name.str, tables->view_name.length, + cs); table->field[3]->store(tables->query.str, tables->query.length, cs); if (tables->with_check != VIEW_CHECK_NONE) @@ -2680,7 +2684,7 @@ void store_constraints(TABLE *table, const char*db, const char *tname, const char *con_type, uint con_len) { CHARSET_INFO *cs= system_charset_info; - restore_record(table, default_values); + restore_record(table, s->default_values); table->field[1]->store(db, strlen(db), cs); table->field[2]->store(key_name, key_len, cs); table->field[3]->store(db, strlen(db), cs); @@ -2709,11 +2713,11 @@ static int get_schema_constraints_record(THD *thd, struct st_table_list *tables, List f_key_list; TABLE *show_table= tables->table; KEY *key_info=show_table->key_info; - uint primary_key= show_table->primary_key; + uint primary_key= show_table->s->primary_key; show_table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_TIME); - for (uint i=0 ; i < show_table->keys ; i++, key_info++) + for (uint i=0 ; i < show_table->s->keys ; i++, key_info++) { if (i != primary_key && !(key_info->flags & HA_NOSAME)) continue; @@ -2774,11 +2778,11 @@ static int get_schema_key_column_usage_record(THD *thd, List f_key_list; TABLE *show_table= tables->table; KEY *key_info=show_table->key_info; - uint primary_key= show_table->primary_key; + uint primary_key= show_table->s->primary_key; show_table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_TIME); - for (uint i=0 ; i < show_table->keys ; i++, key_info++) + for (uint i=0 ; i < show_table->s->keys ; i++, key_info++) { if (i != primary_key && !(key_info->flags & HA_NOSAME)) continue; @@ -2790,7 +2794,7 @@ static int get_schema_key_column_usage_record(THD *thd, if (key_part->field) { f_idx++; - restore_record(table, default_values); + restore_record(table, s->default_values); store_key_column_usage(table, base_name, file_name, key_info->name, strlen(key_info->name), @@ -2815,7 +2819,7 @@ static int get_schema_key_column_usage_record(THD *thd, { r_info= it1++; f_idx++; - restore_record(table, default_values); + restore_record(table, s->default_values); store_key_column_usage(table, base_name, file_name, f_key_info->forein_id->str, f_key_info->forein_id->length, @@ -2843,7 +2847,7 @@ int fill_open_tables(THD *thd, TABLE_LIST *tables, COND *cond) for (; open_list ; open_list=open_list->next) { - restore_record(table, default_values); + restore_record(table, s->default_values); table->field[0]->store(open_list->db, strlen(open_list->db), cs); table->field[1]->store(open_list->table, strlen(open_list->table), cs); table->field[2]->store((longlong) open_list->in_use); @@ -3182,13 +3186,13 @@ int mysql_schema_table(THD *thd, LEX *lex, TABLE_LIST *table_list) { DBUG_RETURN(1); } - table->tmp_table= TMP_TABLE; + table->s->tmp_table= TMP_TABLE; table->grant.privilege= SELECT_ACL; table->alias_name_used= my_strcasecmp(table_alias_charset, - table_list->real_name, + table_list->table_name, table_list->alias); - table_list->schema_table_name= table_list->real_name; - table_list->real_name= table->real_name; + table_list->schema_table_name= table_list->table_name; + table_list->table_name= (char*) table->s->table_name; table_list->table= table; table->next= thd->derived_tables; thd->derived_tables= table; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index e8655a2a304..d7a272f804c 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -36,6 +36,7 @@ static char *make_unique_key_name(const char *field_name,KEY *start,KEY *end); static int copy_data_between_tables(TABLE *from,TABLE *to, List &create, enum enum_duplicates handle_duplicates, + bool ignore, uint order_num, ORDER *order, ha_rows *copied,ha_rows *deleted); @@ -78,7 +79,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, { if (thd->global_read_lock) { - my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0), tables->real_name); + my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0), tables->table_name); error= TRUE; goto err; } @@ -193,7 +194,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, { char *db=table->db; mysql_ha_flush(thd, table, MYSQL_HA_CLOSE_FINAL); - if (!close_temporary_table(thd, db, table->real_name)) + if (!close_temporary_table(thd, db, table->table_name)) { tmp_table_deleted=1; continue; // removed temporary table @@ -202,17 +203,17 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, error=0; if (!drop_temporary) { - abort_locked_tables(thd,db,table->real_name); - while (remove_table_from_cache(thd,db,table->real_name) && !thd->killed) + abort_locked_tables(thd,db,table->table_name); + while (remove_table_from_cache(thd,db,table->table_name) && !thd->killed) { dropping_tables++; (void) pthread_cond_wait(&COND_refresh,&LOCK_open); dropping_tables--; } - drop_locked_tables(thd,db,table->real_name); + drop_locked_tables(thd,db,table->table_name); if (thd->killed) DBUG_RETURN(-1); - alias= (lower_case_table_names == 2) ? table->alias : table->real_name; + alias= (lower_case_table_names == 2) ? table->alias : table->table_name; /* remove form file and isam files */ strxmov(path, mysql_data_home, "/", db, "/", alias, reg_ext, NullS); (void) unpack_filename(path,path); @@ -224,7 +225,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, if (if_exists) push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR), - table->real_name); + table->table_name); else error= 1; } @@ -253,7 +254,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, { if (wrong_tables.length()) wrong_tables.append(','); - wrong_tables.append(String(table->real_name,system_charset_info)); + wrong_tables.append(String(table->table_name,system_charset_info)); } } thd->tmp_table_used= tmp_table_deleted; @@ -1547,19 +1548,20 @@ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, { TABLE tmp_table; // Used during 'create_field()' TABLE *table= 0; - tmp_table.table_name=0; uint select_field_count= items->elements; - DBUG_ENTER("create_table_from_items"); - /* Add selected items to field list */ List_iterator_fast it(*items); Item *item; Field *tmp_field; - tmp_table.db_create_options=0; + DBUG_ENTER("create_table_from_items"); + + tmp_table.alias= 0; + tmp_table.s= &tmp_table.share_not_to_be_used; + tmp_table.s->db_create_options=0; + tmp_table.s->blob_ptr_size= portable_sizeof_char_ptr; + tmp_table.s->db_low_byte_first= test(create_info->db_type == DB_TYPE_MYISAM || + create_info->db_type == DB_TYPE_HEAP); tmp_table.null_row=tmp_table.maybe_null=0; - tmp_table.blob_ptr_size=portable_sizeof_char_ptr; - tmp_table.db_low_byte_first= test(create_info->db_type == DB_TYPE_MYISAM || - create_info->db_type == DB_TYPE_HEAP); while ((item=it++)) { @@ -1592,13 +1594,13 @@ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, */ { tmp_disable_binlog(thd); - if (!mysql_create_table(thd, create_table->db, create_table->real_name, + if (!mysql_create_table(thd, create_table->db, create_table->table_name, create_info, *extra_fields, *keys, 0, select_field_count)) { if (!(table= open_table(thd, create_table, thd->mem_root, (bool*) 0))) quick_rm_table(create_info->db_type, create_table->db, - table_case_name(create_info, create_table->real_name)); + table_case_name(create_info, create_table->table_name)); } reenable_binlog(thd); if (!table) // open failed @@ -1612,7 +1614,7 @@ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, hash_delete(&open_cache,(byte*) table); VOID(pthread_mutex_unlock(&LOCK_open)); quick_rm_table(create_info->db_type, create_table->db, - table_case_name(create_info, create_table->real_name)); + table_case_name(create_info, create_table->table_name)); DBUG_RETURN(0); } table->file->extra(HA_EXTRA_WRITE_CACHE); @@ -1695,7 +1697,7 @@ mysql_rename_table(enum db_type base, static void wait_while_table_is_used(THD *thd,TABLE *table, enum ha_extra_function function) { - DBUG_PRINT("enter",("table: %s", table->real_name)); + DBUG_PRINT("enter",("table: %s", table->s->table_name)); DBUG_ENTER("wait_while_table_is_used"); safe_mutex_assert_owner(&LOCK_open); @@ -1704,8 +1706,7 @@ static void wait_while_table_is_used(THD *thd,TABLE *table, mysql_lock_abort(thd, table); // end threads waiting on lock /* Wait until all there are no other threads that has this table open */ - while (remove_table_from_cache(thd,table->table_cache_key, - table->real_name)) + while (remove_table_from_cache(thd, table->s->db, table->s->table_name)) { dropping_tables++; (void) pthread_cond_wait(&COND_refresh,&LOCK_open); @@ -1782,7 +1783,7 @@ static int prepare_for_restore(THD* thd, TABLE_LIST* table, { char* backup_dir= thd->lex->backup_dir; char src_path[FN_REFLEN], dst_path[FN_REFLEN]; - char* table_name = table->real_name; + char* table_name = table->table_name; char* db = thd->db ? thd->db : table->db; if (fn_format_relative_to_data_home(src_path, table_name, backup_dir, @@ -1843,7 +1844,7 @@ static int prepare_for_repair(THD* thd, TABLE_LIST *table_list, { char name[FN_REFLEN]; strxmov(name, mysql_data_home, "/", table_list->db, "/", - table_list->real_name, NullS); + table_list->table_name, NullS); if (openfrm(thd, name, "", 0, 0, 0, &tmp_table)) DBUG_RETURN(0); // Can't open frm file table= &tmp_table; @@ -1870,7 +1871,7 @@ static int prepare_for_repair(THD* thd, TABLE_LIST *table_list, if (!ext[0] || !ext[1]) goto end; // No data file - strxmov(from, table->path, ext[1], NullS); // Name of data file + strxmov(from, table->s->path, ext[1], NullS); // Name of data file if (!my_stat(from, &stat_info, MYF(0))) goto end; // Can't use USE_FRM flag @@ -1978,7 +1979,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, char table_name[NAME_LEN*2+2]; char* db = (table->db) ? table->db : thd->db; bool fatal_error=0; - strxmov(table_name,db ? db : "",".",table->real_name,NullS); + strxmov(table_name,db ? db : "",".",table->table_name,NullS); thd->open_options|= extra_open_options; table->lock_type= lock_type; @@ -2064,14 +2065,14 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, } /* Close all instances of the table to allow repair to rename files */ - if (lock_type == TL_WRITE && table->table->version) + if (lock_type == TL_WRITE && table->table->s->version) { pthread_mutex_lock(&LOCK_open); const char *old_message=thd->enter_cond(&COND_refresh, &LOCK_open, "Waiting to get writelock"); mysql_lock_abort(thd,table->table); - while (remove_table_from_cache(thd, table->table->table_cache_key, - table->table->real_name) && + while (remove_table_from_cache(thd, table->table->s->db, + table->table->s->table_name) && ! thd->killed) { dropping_tables++; @@ -2179,12 +2180,12 @@ send_result_message: break; } if (fatal_error) - table->table->version=0; // Force close of table + table->table->s->version=0; // Force close of table else if (open_for_modify) { pthread_mutex_lock(&LOCK_open); - remove_table_from_cache(thd, table->table->table_cache_key, - table->table->real_name); + remove_table_from_cache(thd, table->table->s->db, + table->table->s->table_name); pthread_mutex_unlock(&LOCK_open); /* May be something modified consequently we have to invalidate cache */ query_cache_invalidate3(thd, table->table, 0); @@ -2362,7 +2363,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE **tmp_table; char src_path[FN_REFLEN], dst_path[FN_REFLEN]; char *db= table->db; - char *table_name= table->real_name; + char *table_name= table->table_name; char *src_db= thd->db; char *src_table= table_ident->table.str; int err; @@ -2384,13 +2385,13 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, bzero((gptr)&src_tables_list, sizeof(src_tables_list)); src_tables_list.db= table_ident->db.str ? table_ident->db.str : thd->db; - src_tables_list.real_name= table_ident->table.str; + src_tables_list.table_name= table_ident->table.str; if (lock_and_wait_for_table_name(thd, &src_tables_list)) goto err; if ((tmp_table= find_temporary_table(thd, src_db, src_table))) - strxmov(src_path, (*tmp_table)->path, reg_ext, NullS); + strxmov(src_path, (*tmp_table)->s->path, reg_ext, NullS); else { strxmov(src_path, mysql_data_home, "/", src_db, "/", src_table, @@ -2668,7 +2669,7 @@ int mysql_create_indexes(THD *thd, TABLE_LIST *table_list, List &keys) create_info.default_table_charset= thd->variables.collation_database; /* Cleanup the fields list. We do not want to create existing fields. */ fields.delete_elements(); - if (real_alter_table(thd, table_list->db, table_list->real_name, + if (real_alter_table(thd, table_list->db, table_list->table_name, &create_info, table_list, table, fields, keys, drop, alter, 0, (ORDER*)0, ALTER_ADD_INDEX, DUP_ERROR)) @@ -2680,7 +2681,7 @@ int mysql_create_indexes(THD *thd, TABLE_LIST *table_list, List &keys) if (table->file->add_index(table, key_info_buffer, key_count)|| (my_snprintf(path, sizeof(path), "%s/%s/%s%s", mysql_data_home, table_list->db, (lower_case_table_names == 2) ? - table_list->alias: table_list->real_name, reg_ext) >= + table_list->alias: table_list->table_name, reg_ext) >= (int) sizeof(path)) || ! unpack_filename(path, path) || mysql_create_frm(thd, path, &create_info, @@ -2765,7 +2766,7 @@ int mysql_drop_indexes(THD *thd, TABLE_LIST *table_list, if ((drop_key)|| (drop.elements<= 0)) { - if (real_alter_table(thd, table_list->db, table_list->real_name, + if (real_alter_table(thd, table_list->db, table_list->table_name, &create_info, table_list, table, fields, keys, drop, alter, 0, (ORDER*)0, ALTER_DROP_INDEX, DUP_ERROR)) @@ -2782,7 +2783,7 @@ int mysql_drop_indexes(THD *thd, TABLE_LIST *table_list, /*select_field_count*/ 0)|| (snprintf(path, sizeof(path), "%s/%s/%s%s", mysql_data_home, table_list->db, (lower_case_table_names == 2)? - table_list->alias: table_list->real_name, reg_ext)>= + table_list->alias: table_list->table_name, reg_ext)>= (int)sizeof(path))|| ! unpack_filename(path, path)|| mysql_create_frm(thd, path, &create_info, @@ -2806,7 +2807,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, TABLE_LIST *table_list, List &fields, List &keys, uint order_num, ORDER *order, - enum enum_duplicates handle_duplicates, + enum enum_duplicates handle_duplicates, bool ignore, ALTER_INFO *alter_info, bool do_send_ok) { TABLE *table,*new_table=0; @@ -2822,7 +2823,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, DBUG_ENTER("mysql_alter_table"); thd->proc_info="init"; - table_name=table_list->real_name; + table_name=table_list->table_name; alias= (lower_case_table_names == 2) ? table_list->alias : table_name; db=table_list->db; @@ -2863,7 +2864,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, } else { - if (table->tmp_table) + if (table->s->tmp_table) { if (find_temporary_table(thd,new_db,new_name_buff)) { @@ -2891,9 +2892,9 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, new_name= table_name; } - old_db_type=table->db_type; + old_db_type= table->s->db_type; if (create_info->db_type == DB_TYPE_DEFAULT) - create_info->db_type=old_db_type; + create_info->db_type= old_db_type; if ((new_db_type= ha_checktype(create_info->db_type)) != create_info->db_type) { @@ -2905,11 +2906,11 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, new_name); } if (create_info->row_type == ROW_TYPE_NOT_USED) - create_info->row_type=table->row_type; + create_info->row_type= table->s->row_type; thd->proc_info="setup"; if (!(alter_info->flags & ~(ALTER_RENAME | ALTER_KEYS_ONOFF)) && - !table->tmp_table) // no need to touch frm + !table->s->tmp_table) // no need to touch frm { error=0; if (new_name != table_name || new_db != db) @@ -2959,7 +2960,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, { push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA), - table->table_name); + table->alias); error=0; } if (!error) @@ -2987,15 +2988,15 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, /* Let new create options override the old ones */ if (!(used_fields & HA_CREATE_USED_MIN_ROWS)) - create_info->min_rows=table->min_rows; + create_info->min_rows= table->s->min_rows; if (!(used_fields & HA_CREATE_USED_MAX_ROWS)) - create_info->max_rows=table->max_rows; + create_info->max_rows= table->s->max_rows; if (!(used_fields & HA_CREATE_USED_AVG_ROW_LENGTH)) - create_info->avg_row_length=table->avg_row_length; + create_info->avg_row_length= table->s->avg_row_length; if (!(used_fields & HA_CREATE_USED_DEFAULT_CHARSET)) - create_info->default_table_charset= table->table_charset; + create_info->default_table_charset= table->s->table_charset; - restore_record(table,default_values); // Empty record for DEFAULT + restore_record(table, s->default_values); // Empty record for DEFAULT List_iterator drop_it(alter_info->drop_list); List_iterator def_it(fields); List_iterator alter_it(alter_info->alter_list); @@ -3125,7 +3126,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, List key_parts; KEY *key_info=table->key_info; - for (uint i=0 ; i < table->keys ; i++,key_info++) + for (uint i=0 ; i < table->s->keys ; i++,key_info++) { char *key_name= key_info->name; Alter_drop *drop; @@ -3219,7 +3220,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, goto err; } - db_create_options=table->db_create_options & ~(HA_OPTION_PACK_RECORD); + db_create_options= table->s->db_create_options & ~(HA_OPTION_PACK_RECORD); my_snprintf(tmp_name, sizeof(tmp_name), "%s-%lx_%lx", tmp_file_prefix, current_pid, thd->thread_id); /* Safety fix for innodb */ @@ -3227,7 +3228,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, my_casedn_str(files_charset_info, tmp_name); create_info->db_type=new_db_type; if (!create_info->comment) - create_info->comment=table->comment; + create_info->comment= table->s->comment; table->file->update_create_info(create_info); if ((create_info->table_options & @@ -3243,7 +3244,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, HA_OPTION_NO_DELAY_KEY_WRITE); create_info->table_options|= db_create_options; - if (table->tmp_table) + if (table->s->tmp_table) create_info->options|=HA_LEX_CREATE_TMP_TABLE; /* @@ -3253,7 +3254,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, */ need_copy_table=(alter_info->flags & ~(ALTER_CHANGE_COLUMN_DEFAULT|ALTER_OPTIONS) || create_info->used_fields & ~(HA_CREATE_USED_COMMENT|HA_CREATE_USED_PASSWORD) || - table->tmp_table); + table->s->tmp_table); create_info->frm_only= !need_copy_table; /* @@ -3314,12 +3315,12 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, } if (need_copy_table) { - if (table->tmp_table) + if (table->s->tmp_table) { TABLE_LIST tbl; bzero((void*) &tbl, sizeof(tbl)); tbl.db= new_db; - tbl.real_name= tbl.alias= tmp_name; + tbl.table_name= tbl.alias= tmp_name; new_table= open_table(thd, &tbl, thd->mem_root, 0); } else @@ -3343,18 +3344,18 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, thd->proc_info="copy to tmp table"; next_insert_id=thd->next_insert_id; // Remember for logging copied=deleted=0; - if (new_table && !new_table->is_view) + if (new_table && !new_table->s->is_view) { new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; new_table->next_number_field=new_table->found_next_number_field; error=copy_data_between_tables(table,new_table,create_list, - handle_duplicates, + handle_duplicates, ignore, order_num, order, &copied, &deleted); } thd->last_insert_id=next_insert_id; // Needed for correct log thd->count_cuted_fields= CHECK_FIELD_IGNORE; - if (table->tmp_table) + if (table->s->tmp_table) { /* We changed a temporary table */ if (error) @@ -3373,7 +3374,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, thd->lock=0; } /* Remove link to old table and rename the new one */ - close_temporary_table(thd,table->table_cache_key,table_name); + close_temporary_table(thd, table->s->db, table_name); if (rename_temporary_table(thd, new_table, new_db, new_alias)) { // Fatal error close_temporary_table(thd,new_db,tmp_name); @@ -3567,6 +3568,7 @@ static int copy_data_between_tables(TABLE *from,TABLE *to, List &create, enum enum_duplicates handle_duplicates, + bool ignore, uint order_num, ORDER *order, ha_rows *copied, ha_rows *deleted) @@ -3586,7 +3588,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, ulong save_sql_mode; DBUG_ENTER("copy_data_between_tables"); - if (!(copy= new Copy_field[to->fields])) + if (!(copy= new Copy_field[to->s->fields])) DBUG_RETURN(-1); /* purecov: inspected */ if (to->file->external_lock(thd, F_WRLCK)) @@ -3628,9 +3630,9 @@ copy_data_between_tables(TABLE *from,TABLE *to, from->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE), MYF(MY_FAE | MY_ZEROFILL)); bzero((char*) &tables,sizeof(tables)); - tables.table = from; - tables.alias = tables.real_name= from->real_name; - tables.db = from->table_cache_key; + tables.table= from; + tables.alias= tables.table_name= (char*) from->s->table_name; + tables.db= (char*) from->s->db; error=1; if (thd->lex->select_lex.setup_ref_array(thd, order_num) || @@ -3660,10 +3662,11 @@ copy_data_between_tables(TABLE *from,TABLE *to, current query id */ from->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1); - if (handle_duplicates == DUP_IGNORE || + if (ignore || handle_duplicates == DUP_REPLACE) to->file->extra(HA_EXTRA_IGNORE_DUP_KEY); thd->row_count= 0; + restore_record(to, s->default_values); // Create empty record while (!(error=info.read_record(&info))) { if (thd->killed) @@ -3686,7 +3689,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, } if ((error=to->file->write_row((byte*) to->record[0]))) { - if ((handle_duplicates != DUP_IGNORE && + if ((!ignore && handle_duplicates != DUP_REPLACE) || (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)) @@ -3757,14 +3760,14 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, lex->alter_info.reset(); bzero((char*) &create_info,sizeof(create_info)); create_info.db_type=DB_TYPE_DEFAULT; - create_info.row_type=ROW_TYPE_DEFAULT; + create_info.row_type=ROW_TYPE_NOT_USED; create_info.default_table_charset=default_charset_info; /* Force alter table to recreate table */ lex->alter_info.flags= ALTER_CHANGE_COLUMN; DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info, table_list, lex->create_list, lex->key_list, 0, (ORDER *) 0, - DUP_ERROR, &lex->alter_info, do_send_ok)); + DUP_ERROR, 0, &lex->alter_info, do_send_ok)); } @@ -3789,7 +3792,7 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) char table_name[NAME_LEN*2+2]; TABLE *t; - strxmov(table_name, table->db ,".", table->real_name, NullS); + strxmov(table_name, table->db ,".", table->table_name, NullS); t= table->table= open_ltable(thd, table, TL_READ_NO_INSERT); thd->clear_error(); // these errors shouldn't get client @@ -3834,7 +3837,7 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) row_crc= my_checksum(row_crc, t->record[0], ((byte*) t->field[0]->ptr) - t->record[0]); - for (uint i= 0; i < t->fields; i++ ) + for (uint i= 0; i < t->s->fields; i++ ) { Field *f= t->field[i]; if (f->type() == FIELD_TYPE_BLOB) diff --git a/sql/sql_test.cc b/sql/sql_test.cc index 6cffa9df2c6..2739fe865f7 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -79,7 +79,7 @@ void print_cached_tables(void) { TABLE *entry=(TABLE*) hash_element(&open_cache,idx); printf("%-14.14s %-32s%6ld%8ld%10ld%6d %s\n", - entry->table_cache_key,entry->real_name,entry->version, + entry->s->db, entry->s->table_name, entry->s->version, entry->in_use ? entry->in_use->thread_id : 0L, entry->in_use ? entry->in_use->dbug_thread_id : 0L, entry->db_stat ? 1 : 0, entry->in_use ? lock_descriptions[(int)entry->reginfo.lock_type] : "Not in use"); @@ -131,7 +131,7 @@ void TEST_filesort(SORT_FIELD *sortorder,uint s_length) { if (sortorder->field->table_name) { - out.append(sortorder->field->table_name); + out.append(*sortorder->field->table_name); out.append('.'); } out.append(sortorder->field->field_name ? sortorder->field->field_name: @@ -167,7 +167,7 @@ TEST_join(JOIN *join) TABLE *form=tab->table; char key_map_buff[128]; fprintf(DBUG_FILE,"%-16.16s type: %-7s q_keys: %s refs: %d key: %d len: %d\n", - form->table_name, + form->alias, join_type_str[tab->type], tab->keys.print(key_map_buff), tab->ref.key_parts, @@ -261,7 +261,7 @@ print_plan(JOIN* join, double read_time, double record_count, pos = join->positions[i]; table= pos.table->table; if (table) - fputs(table->real_name, DBUG_FILE); + fputs(table->s->table_name, DBUG_FILE); fputc(' ', DBUG_FILE); } fputc('\n', DBUG_FILE); @@ -278,7 +278,7 @@ print_plan(JOIN* join, double read_time, double record_count, pos= join->best_positions[i]; table= pos.table->table; if (table) - fputs(table->real_name, DBUG_FILE); + fputs(table->s->table_name, DBUG_FILE); fputc(' ', DBUG_FILE); } } @@ -289,7 +289,7 @@ print_plan(JOIN* join, double read_time, double record_count, for (plan_nodes= join->best_ref ; *plan_nodes ; plan_nodes++) { join_table= (*plan_nodes); - fputs(join_table->table->real_name, DBUG_FILE); + fputs(join_table->table->s->table_name, DBUG_FILE); fprintf(DBUG_FILE, "(%lu,%lu,%lu)", (ulong) join_table->found_records, (ulong) join_table->records, @@ -332,12 +332,12 @@ static void push_locks_into_array(DYNAMIC_ARRAY *ar, THR_LOCK_DATA *data, if (data) { TABLE *table=(TABLE *)data->debug_print_param; - if (table && table->tmp_table == NO_TMP_TABLE) + if (table && table->s->tmp_table == NO_TMP_TABLE) { TABLE_LOCK_INFO table_lock_info; - table_lock_info.thread_id=table->in_use->thread_id; - memcpy(table_lock_info.table_name, table->table_cache_key, - table->key_length); + table_lock_info.thread_id= table->in_use->thread_id; + memcpy(table_lock_info.table_name, table->s->table_cache_key, + table->s->key_length); table_lock_info.table_name[strlen(table_lock_info.table_name)]='.'; table_lock_info.waiting=wait; table_lock_info.lock_text=text; diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index 96ede3fbe6b..5968d13efde 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -70,7 +70,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) Table_triggers_list::create_trigger() because we want to avoid messing with table cash for views and temporary tables. */ - if (tables->view || table->tmp_table != NO_TMP_TABLE) + if (tables->view || table->s->tmp_table != NO_TMP_TABLE) { my_error(ER_TRG_ON_VIEW_OR_TEMP_TABLE, MYF(0), tables->alias); DBUG_RETURN(TRUE); @@ -188,7 +188,7 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables) strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", tables->db, "/", NullS); dir.length= unpack_filename(dir_buff, dir_buff); dir.str= dir_buff; - file.length= strxnmov(file_buff, FN_REFLEN, tables->real_name, + file.length= strxnmov(file_buff, FN_REFLEN, tables->table_name, triggers_file_ext, NullS) - file_buff; file.str= file_buff; @@ -257,7 +257,7 @@ bool Table_triggers_list::drop_trigger(THD *thd, TABLE_LIST *tables) elsewhere). */ strxnmov(path, FN_REFLEN, mysql_data_home, "/", tables->db, "/", - tables->real_name, triggers_file_ext, NullS); + tables->table_name, triggers_file_ext, NullS); unpack_filename(path, path); return my_delete(path, MYF(MY_WME)); } @@ -270,7 +270,7 @@ bool Table_triggers_list::drop_trigger(THD *thd, TABLE_LIST *tables) "/", NullS); dir.length= unpack_filename(dir_buff, dir_buff); dir.str= dir_buff; - file.length= strxnmov(file_buff, FN_REFLEN, tables->real_name, + file.length= strxnmov(file_buff, FN_REFLEN, tables->table_name, triggers_file_ext, NullS) - file_buff; file.str= file_buff; @@ -315,7 +315,7 @@ bool Table_triggers_list::prepare_old_row_accessors(TABLE *table) Field **fld, **old_fld; if (!(old_field= (Field **)alloc_root(&table->mem_root, - (table->fields + 1) * + (table->s->fields + 1) * sizeof(Field*)))) return 1; diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index 7fd81f22e66..ec47fb055e2 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -146,7 +146,7 @@ void udf_init() new_thd->db_length=5; bzero((gptr) &tables,sizeof(tables)); - tables.alias= tables.real_name= (char*) "func"; + tables.alias= tables.table_name= (char*) "func"; tables.lock_type = TL_READ; tables.db=new_thd->db; @@ -168,7 +168,7 @@ void udf_init() char *dl_name= get_field(&mem, table->field[2]); bool new_dl=0; Item_udftype udftype=UDFTYPE_FUNCTION; - if (table->fields >= 4) // New func table + if (table->s->fields >= 4) // New func table udftype=(Item_udftype) table->field[3]->val_int(); if (!(tmp = add_udf(&name,(Item_result) table->field[1]->val_int(), @@ -429,16 +429,16 @@ int mysql_create_function(THD *thd,udf_func *udf) bzero((char*) &tables,sizeof(tables)); tables.db= (char*) "mysql"; - tables.real_name= tables.alias= (char*) "func"; + tables.table_name= tables.alias= (char*) "func"; /* Allow creation of functions even if we can't open func table */ if (!(table = open_ltable(thd,&tables,TL_WRITE))) goto err; - restore_record(table,default_values); // Default values for fields + restore_record(table, s->default_values); // Default values for fields table->field[0]->store(u_d->name.str, u_d->name.length, system_charset_info); table->field[1]->store((longlong) u_d->returns); table->field[2]->store(u_d->dl,(uint) strlen(u_d->dl), system_charset_info); - if (table->fields >= 4) // If not old func format + if (table->s->fields >= 4) // If not old func format table->field[3]->store((longlong) u_d->type); error = table->file->write_row(table->record[0]); @@ -488,7 +488,7 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) bzero((char*) &tables,sizeof(tables)); tables.db=(char*) "mysql"; - tables.real_name= tables.alias= (char*) "func"; + tables.table_name= tables.alias= (char*) "func"; if (!(table = open_ltable(thd,&tables,TL_WRITE))) goto err; if (!table->file->index_read_idx(table->record[0],0,(byte*) udf_name->str, diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 012a26a6f4d..ce84be47243 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -51,10 +51,10 @@ select_union::select_union(TABLE *table_par) { bzero((char*) &info,sizeof(info)); /* - We can always use DUP_IGNORE because the temporary table will only + We can always use IGNORE because the temporary table will only contain a unique key if we are using not using UNION ALL */ - info.handle_duplicates= DUP_IGNORE; + info.ignore= 1; } select_union::~select_union() @@ -313,7 +313,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); bzero((char*) &result_table_list, sizeof(result_table_list)); result_table_list.db= (char*) ""; - result_table_list.real_name= result_table_list.alias= (char*) "union"; + result_table_list.table_name= result_table_list.alias= (char*) "union"; result_table_list.table= table; union_result->set_table(table); diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 4d30ffbda9b..c35df041a39 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -31,18 +31,18 @@ static bool safe_update_on_fly(JOIN_TAB *join_tab, List *fields); static bool compare_record(TABLE *table, ulong query_id) { - if (!table->blob_fields) + if (!table->s->blob_fields) return cmp_record(table,record[1]); /* Compare null bits */ if (memcmp(table->null_flags, - table->null_flags+table->rec_buff_length, - table->null_bytes)) + table->null_flags+table->s->rec_buff_length, + table->s->null_bytes)) return TRUE; // Diff in NULL value /* Compare updated fields */ for (Field **ptr=table->field ; *ptr ; ptr++) { if ((*ptr)->query_id == query_id && - (*ptr)->cmp_binary_offset(table->rec_buff_length)) + (*ptr)->cmp_binary_offset(table->s->rec_buff_length)) return TRUE; } return FALSE; @@ -113,12 +113,11 @@ int mysql_update(THD *thd, COND *conds, uint order_num, ORDER *order, ha_rows limit, - enum enum_duplicates handle_duplicates) + enum enum_duplicates handle_duplicates, bool ignore) { bool using_limit= limit != HA_POS_ERROR; bool safe_update= thd->options & OPTION_SAFE_UPDATES; bool used_key_is_modified, transactional_table, log_delayed; - bool ignore_err= (thd->lex->duplicates == DUP_IGNORE); int res; int error=0; uint used_index; @@ -167,14 +166,12 @@ int mysql_update(THD *thd, table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); /* Calculate "table->used_keys" based on the WHERE */ - table->used_keys=table->keys_in_use; + table->used_keys= table->s->keys_in_use; table->quick_keys.clear_all(); #ifndef NO_EMBEDDED_ACCESS_CHECKS - /* In case of view TABLE_LIST contain right privilages request */ - want_privilege= (table_list->view ? - table_list->grant.want_privilege : - table->grant.want_privilege); + /* TABLE_LIST contain right privilages request */ + want_privilege= table_list->grant.want_privilege; #endif if (mysql_prepare_update(thd, table_list, &conds, order_num, order)) DBUG_RETURN(1); @@ -222,7 +219,7 @@ int mysql_update(THD *thd, #ifndef NO_EMBEDDED_ACCESS_CHECKS /* Check values */ table_list->grant.want_privilege= table->grant.want_privilege= - (SELECT_ACL & ~table->grant.privilege); + (SELECT_ACL & ~~table->grant.privilege); #endif if (setup_fields(thd, 0, table_list, values, 1, 0, 0)) { @@ -380,7 +377,7 @@ int mysql_update(THD *thd, } } - if (handle_duplicates == DUP_IGNORE) + if (ignore) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); if (select && select->quick && select->quick->reset()) @@ -395,7 +392,7 @@ int mysql_update(THD *thd, transactional_table= table->file->has_transactions(); thd->no_trans_update= 0; - thd->abort_on_warning= test(handle_duplicates != DUP_IGNORE && + thd->abort_on_warning= test(!ignore && (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))); @@ -415,7 +412,7 @@ int mysql_update(THD *thd, if (compare_record(table, query_id)) { - if ((res= table_list->view_check_option(thd, ignore_err)) != + if ((res= table_list->view_check_option(thd, ignore)) != VIEW_CHECK_OK) { found--; @@ -433,8 +430,7 @@ int mysql_update(THD *thd, updated++; thd->no_trans_update= !transactional_table; } - else if (handle_duplicates != DUP_IGNORE || - error != HA_ERR_FOUND_DUPP_KEY) + else if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) { thd->fatal_error(); // Force error message table->file->print_error(error,MYF(0)); @@ -473,7 +469,7 @@ int mysql_update(THD *thd, query_cache_invalidate3(thd, table_list, 1); } - log_delayed= (transactional_table || table->tmp_table); + log_delayed= (transactional_table || table->s->tmp_table); if ((updated || (error < 0)) && (error <= 0 || !transactional_table)) { if (mysql_bin_log.is_open()) @@ -574,7 +570,7 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list, /* Check that we are not using table that we are updating in a sub select */ if (unique_table(table_list, table_list->next_global)) { - my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name); + my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name); DBUG_RETURN(TRUE); } select_lex->fix_prepare_information(thd, conds); @@ -700,9 +696,9 @@ bool mysql_multi_update_prepare(THD *thd) single SELECT on top and have to check underlying SELECTs of it */ if (lex->select_lex.check_updateable_in_subqueries(tl->db, - tl->real_name)) + tl->table_name)) { - my_error(ER_UPDATE_TABLE_USED, MYF(0), tl->real_name); + my_error(ER_UPDATE_TABLE_USED, MYF(0), tl->table_name); DBUG_RETURN(TRUE); } DBUG_PRINT("info",("setting table `%s` for update", tl->alias)); @@ -717,7 +713,7 @@ bool mysql_multi_update_prepare(THD *thd) } /* Check access privileges for table */ - if (!tl->derived) + if (!tl->derived && !tl->belong_to_view) { uint want_privilege= tl->updating ? UPDATE_ACL : SELECT_ACL; if (!using_lock_tables) @@ -812,7 +808,7 @@ bool mysql_multi_update(THD *thd, List *values, COND *conds, ulong options, - enum enum_duplicates handle_duplicates, + enum enum_duplicates handle_duplicates, bool ignore, SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex) { bool res= FALSE; @@ -825,7 +821,7 @@ bool mysql_multi_update(THD *thd, if (!(result= new multi_update(thd, table_list, thd->lex->select_lex.leaf_tables, fields, values, - handle_duplicates))) + handle_duplicates, ignore))) DBUG_RETURN(TRUE); thd->no_trans_update= 0; @@ -851,12 +847,13 @@ bool mysql_multi_update(THD *thd, multi_update::multi_update(THD *thd_arg, TABLE_LIST *table_list, TABLE_LIST *leaves_list, List *field_list, List *value_list, - enum enum_duplicates handle_duplicates_arg) + enum enum_duplicates handle_duplicates_arg, + bool ignore_arg) :all_tables(table_list), leaves(leaves_list), update_tables(0), thd(thd_arg), tmp_tables(0), updated(0), found(0), fields(field_list), values(value_list), table_count(0), copy_field(0), handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(0), - transactional_tables(1) + transactional_tables(1), ignore(ignore_arg) {} @@ -925,7 +922,7 @@ int multi_update::prepare(List ¬_used_values, table_count= update.elements; update_tables= (TABLE_LIST*) update.first; - tmp_tables = (TABLE **) thd->calloc(sizeof(TABLE *) * table_count); + tmp_tables = (TABLE**) thd->calloc(sizeof(TABLE *) * table_count); tmp_table_param = (TMP_TABLE_PARAM*) thd->calloc(sizeof(TMP_TABLE_PARAM) * table_count); fields_for_table= (List_item **) thd->alloc(sizeof(List_item *) * @@ -977,7 +974,7 @@ int multi_update::prepare(List ¬_used_values, TABLE *table=table_ref->table; if (!(tables_to_update & table->map) && find_table_in_local_list(update_tables, table_ref->db, - table_ref->real_name)) + table_ref->table_name)) table->no_cache= 1; // Disable row cache } DBUG_RETURN(thd->is_fatal_error != 0); @@ -1003,7 +1000,7 @@ multi_update::initialize_tables(JOIN *join) DBUG_RETURN(1); main_table=join->join_tab->table; trans_safe= transactional_tables= main_table->file->has_transactions(); - log_delayed= trans_safe || main_table->tmp_table != NO_TMP_TABLE; + log_delayed= trans_safe || main_table->s->tmp_table != NO_TMP_TABLE; table_to_update= 0; /* Create a temporary table for keys to all tables, except main table */ @@ -1106,8 +1103,8 @@ static bool safe_update_on_fly(JOIN_TAB *join_tab, List *fields) return !join_tab->quick->check_if_keys_used(fields); /* If scanning in clustered key */ if ((table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) && - table->primary_key < MAX_KEY) - return !check_if_key_used(table, table->primary_key, *fields); + table->s->primary_key < MAX_KEY) + return !check_if_key_used(table, table->s->primary_key, *fields); return TRUE; default: break; // Avoid compler warning @@ -1144,7 +1141,6 @@ multi_update::~multi_update() bool multi_update::send_data(List ¬_used_values) { TABLE_LIST *cur_table; - bool ignore_err= (thd->lex->duplicates == DUP_IGNORE); DBUG_ENTER("multi_update::send_data"); for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local) @@ -1179,7 +1175,7 @@ bool multi_update::send_data(List ¬_used_values) if (compare_record(table, thd->query_id)) { int error; - if ((error= cur_table->view_check_option(thd, ignore_err)) != + if ((error= cur_table->view_check_option(thd, ignore)) != VIEW_CHECK_OK) { found--; @@ -1201,8 +1197,7 @@ bool multi_update::send_data(List ¬_used_values) table->record[0]))) { updated--; - if (handle_duplicates != DUP_IGNORE || - error != HA_ERR_FOUND_DUPP_KEY) + if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) { thd->fatal_error(); // Force error message table->file->print_error(error,MYF(0)); @@ -1336,19 +1331,18 @@ int multi_update::do_updates(bool from_send_error) if ((local_error=table->file->update_row(table->record[1], table->record[0]))) { - if (local_error != HA_ERR_FOUND_DUPP_KEY || - handle_duplicates != DUP_IGNORE) + if (!ignore || local_error != HA_ERR_FOUND_DUPP_KEY) goto err; } updated++; - if (table->tmp_table != NO_TMP_TABLE) + if (table->s->tmp_table != NO_TMP_TABLE) log_delayed= 1; } } if (updated != org_updated) { - if (table->tmp_table != NO_TMP_TABLE) + if (table->s->tmp_table != NO_TMP_TABLE) log_delayed= 1; // Tmp tables forces delay log if (table->file->has_transactions()) log_delayed= transactional_tables= 1; @@ -1372,7 +1366,7 @@ err: if (updated != org_updated) { - if (table->tmp_table != NO_TMP_TABLE) + if (table->s->tmp_table != NO_TMP_TABLE) log_delayed= 1; if (table->file->has_transactions()) log_delayed= transactional_tables= 1; diff --git a/sql/sql_view.cc b/sql/sql_view.cc index bafb57c44b0..533876b6718 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -118,7 +118,7 @@ bool mysql_create_view(THD *thd, if (check_some_access(thd, VIEW_ANY_ACL, tbl)) { my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0), - "ANY", thd->priv_user, thd->host_or_ip, tbl->real_name); + "ANY", thd->priv_user, thd->host_or_ip, tbl->table_name); DBUG_RETURN(TRUE); } /* @@ -136,11 +136,11 @@ bool mysql_create_view(THD *thd, /* Make sure that all rights are loaded to the TABLE::grant field. - tbl->real_name will be correct name of table because VIEWs are + tbl->table_name will be correct name of table because VIEWs are not opened yet. */ fill_effective_table_privileges(thd, &tbl->grant, tbl->db, - tbl->real_name); + tbl->table_name); } } @@ -187,7 +187,7 @@ bool mysql_create_view(THD *thd, for (tbl= tables; tbl; tbl= tbl->next_global) { /* is this table temporary and is not view? */ - if (tbl->table->tmp_table != NO_TMP_TABLE && !tbl->view && + if (tbl->table->s->tmp_table != NO_TMP_TABLE && !tbl->view && !tbl->schema_table) { my_error(ER_VIEW_SELECT_TMPTABLE, MYF(0), tbl->alias); @@ -198,7 +198,7 @@ bool mysql_create_view(THD *thd, /* is this table view and the same view which we creates now? */ if (tbl->view && strcmp(tbl->view_db.str, view->db) == 0 && - strcmp(tbl->view_name.str, view->real_name) == 0) + strcmp(tbl->view_name.str, view->table_name) == 0) { my_error(ER_NO_SUCH_TABLE, MYF(0), tbl->view_db.str, tbl->view_name.str); res= TRUE; @@ -272,24 +272,24 @@ bool mysql_create_view(THD *thd, List_iterator_fast it(sl->item_list); Item *item; fill_effective_table_privileges(thd, &view->grant, db, - view->real_name); + view->table_name); while ((item= it++)) { Item_field *fld; uint priv= (get_column_grant(thd, &view->grant, db, - view->real_name, item->name) & + view->table_name, item->name) & VIEW_ANY_ACL); if ((fld= item->filed_for_view_update())) { /* Do we have more privileges on view field then underlying table field? */ - if (!fld->field->table->tmp_table && (~fld->have_privileges & priv)) + if (!fld->field->table->s->tmp_table && (~fld->have_privileges & priv)) { /* VIEW column has more privileges */ my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), "create view", thd->priv_user, thd->host_or_ip, item->name, - view->real_name); + view->table_name); DBUG_RETURN(TRUE); } } @@ -404,7 +404,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, dir.length= strlen(dir_buff); file.str= file_buff; - file.length= (strxnmov(file_buff, FN_REFLEN, view->real_name, reg_ext, + file.length= (strxnmov(file_buff, FN_REFLEN, view->table_name, reg_ext, NullS) - file_buff); /* init timestamp */ if (!view->timestamp.str) @@ -435,7 +435,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, strncmp("VIEW", parser->type()->str, parser->type()->length)) { my_error(ER_WRONG_OBJECT, MYF(0), - (view->db ? view->db : thd->db), view->real_name, "VIEW"); + (view->db ? view->db : thd->db), view->table_name, "VIEW"); DBUG_RETURN(-1); } @@ -518,7 +518,7 @@ loop_out: !((TABLE_LIST*)lex->select_lex.table_list.first)->next_local && find_table_in_global_list(lex->query_tables->next_global, lex->query_tables->db, - lex->query_tables->real_name)) + lex->query_tables->table_name)) { view->updatable_view= 0; } @@ -526,7 +526,7 @@ loop_out: if (view->with_check != VIEW_CHECK_NONE && !view->updatable_view) { - my_error(ER_VIEW_NONUPD_CHECK, MYF(0), view->db, view->real_name); + my_error(ER_VIEW_NONUPD_CHECK, MYF(0), view->db, view->table_name); DBUG_RETURN(-1); } @@ -598,8 +598,8 @@ mysql_make_view(File_parser *parser, TABLE_LIST *table) */ table->view_db.str= table->db; table->view_db.length= table->db_length; - table->view_name.str= table->real_name; - table->view_name.length= table->real_name_length; + table->view_name.str= table->table_name; + table->view_name.length= table->table_name_length; /*TODO: md5 test here and warning if it is differ */ @@ -669,8 +669,8 @@ mysql_make_view(File_parser *parser, TABLE_LIST *table) TABLE_LIST *table= old_lex->proc_table; table->db= (char*)"mysql"; table->db_length= 5; - table->real_name= table->alias= (char*)"proc"; - table->real_name_length= 4; + table->table_name= table->alias= (char*)"proc"; + table->table_name_length= 4; table->cacheable_table= 1; old_lex->add_to_query_tables(table); } @@ -679,18 +679,6 @@ mysql_make_view(File_parser *parser, TABLE_LIST *table) if (lex->spfuns.array.buffer) hash_free(&lex->spfuns); - /* - mark to avoid temporary table using and put view reference and find - last view table - */ - for (tbl= view_tables; - tbl; - tbl= (view_tables_tail= tbl)->next_global) - { - tbl->skip_temporary= 1; - tbl->belong_to_view= top_view; - } - /* check rights to run commands (EXPLAIN SELECT & SHOW CREATE) which show underlying tables @@ -710,6 +698,18 @@ mysql_make_view(File_parser *parser, TABLE_LIST *table) goto err; } + /* + mark to avoid temporary table using and put view reference and find + last view table + */ + for (tbl= view_tables; + tbl; + tbl= (view_tables_tail= tbl)->next_global) + { + tbl->skip_temporary= 1; + tbl->belong_to_view= top_view; + } + /* move SQL_NO_CACHE & Co to whole query */ old_lex->safe_to_cache_query= (old_lex->safe_to_cache_query && lex->safe_to_cache_query); @@ -889,13 +889,13 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) for (view= views; view; view= view->next_local) { strxnmov(path, FN_REFLEN, mysql_data_home, "/", view->db, "/", - view->real_name, reg_ext, NullS); + view->table_name, reg_ext, NullS); (void) unpack_filename(path, path); VOID(pthread_mutex_lock(&LOCK_open)); if (access(path, F_OK) || (type= (mysql_frm_type(path) != FRMTYPE_VIEW))) { char name[FN_REFLEN]; - my_snprintf(name, sizeof(name), "%s.%s", view->db, view->real_name); + my_snprintf(name, sizeof(name), "%s.%s", view->db, view->table_name); if (thd->lex->drop_if_exists) { push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, @@ -905,7 +905,7 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) continue; } if (type) - my_error(ER_WRONG_OBJECT, MYF(0), view->db, view->real_name, "VIEW"); + my_error(ER_WRONG_OBJECT, MYF(0), view->db, view->table_name, "VIEW"); else my_error(ER_BAD_TABLE_ERROR, MYF(0), name); goto err; @@ -998,7 +998,7 @@ bool check_key_in_view(THD *thd, TABLE_LIST *view) if (view->belong_to_view) view= view->belong_to_view; trans= view->field_translation; - key_info_end= (key_info= table->key_info)+ table->keys; + key_info_end= (key_info= table->key_info)+ table->s->keys; elements_in_view= view->view->select_lex.item_list.elements; DBUG_ASSERT(table != 0 && view->field_translation != 0); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 2095a76d0c7..fa23502ea93 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -3220,8 +3220,9 @@ alter: { THD *thd= YYTHD; LEX *lex= thd->lex; - lex->sql_command = SQLCOM_ALTER_TABLE; - lex->name=0; + lex->sql_command= SQLCOM_ALTER_TABLE; + lex->name= 0; + lex->duplicates= DUP_ERROR; if (!lex->select_lex.add_table_to_list(thd, $4, NULL, TL_OPTION_UPDATING)) YYABORT; @@ -3449,8 +3450,9 @@ opt_column: | COLUMN_SYM {}; opt_ignore: - /* empty */ { Lex->duplicates=DUP_ERROR; } - | IGNORE_SYM { Lex->duplicates=DUP_IGNORE; }; + /* empty */ { Lex->ignore= 0;} + | IGNORE_SYM { Lex->ignore= 1;} + ; opt_restrict: /* empty */ { Lex->drop_mode= DROP_DEFAULT; } @@ -5573,7 +5575,9 @@ insert: INSERT { LEX *lex= Lex; - lex->sql_command = SQLCOM_INSERT; + lex->sql_command= SQLCOM_INSERT; + lex->duplicates= DUP_ERROR; + mysql_init_select(lex); /* for subselects */ lex->lock_option= (using_update_log) ? TL_READ_NO_INSERT : TL_READ; lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; @@ -5593,6 +5597,7 @@ replace: LEX *lex=Lex; lex->sql_command = SQLCOM_REPLACE; lex->duplicates= DUP_REPLACE; + mysql_init_select(lex); lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; } replace_lock_option insert2 @@ -5734,6 +5739,7 @@ update: mysql_init_select(lex); lex->sql_command= SQLCOM_UPDATE; lex->lock_option= TL_UNLOCK; /* Will be set later */ + lex->duplicates= DUP_ERROR; } opt_low_priority opt_ignore join_table_list SET update_list @@ -5792,7 +5798,9 @@ delete: { LEX *lex= Lex; lex->sql_command= SQLCOM_DELETE; + mysql_init_select(lex); lex->lock_option= lex->thd->update_lock_default; + lex->ignore= 0; lex->select_lex.init_order(); } opt_delete_options single_multi {} @@ -5849,7 +5857,7 @@ opt_delete_options: opt_delete_option: QUICK { Select->options|= OPTION_QUICK; } | LOW_PRIORITY { Lex->lock_option= TL_WRITE_LOW_PRIORITY; } - | IGNORE_SYM { Lex->duplicates= DUP_IGNORE; }; + | IGNORE_SYM { Lex->ignore= 1; }; truncate: TRUNCATE_SYM opt_table_sym table_name @@ -6357,6 +6365,8 @@ load: LOAD DATA_SYM load_data_lock opt_local INFILE TEXT_STRING_sys lex->sql_command= SQLCOM_LOAD; lex->lock_option= $3; lex->local_file= $4; + lex->duplicates= DUP_ERROR; + lex->ignore= 0; if (!(lex->exchange= new sql_exchange($6.str,0))) YYABORT; lex->field_list.empty(); @@ -6394,7 +6404,7 @@ load_data_lock: opt_duplicate: /* empty */ { Lex->duplicates=DUP_ERROR; } | REPLACE { Lex->duplicates=DUP_REPLACE; } - | IGNORE_SYM { Lex->duplicates=DUP_IGNORE; }; + | IGNORE_SYM { Lex->ignore= 1; }; opt_field_term: /* empty */ @@ -7117,6 +7127,7 @@ set: { LEX *lex=Lex; lex->sql_command= SQLCOM_SET_OPTION; + mysql_init_select(lex); lex->option_type=OPT_SESSION; lex->var_list.empty(); lex->one_shot_set= 0; @@ -7183,7 +7194,6 @@ option_value: } else lex->var_list.push_back(new set_var_user(new Item_func_set_user_var($2,$4))); - } | internal_variable_name equal set_expr_or_default { diff --git a/sql/table.cc b/sql/table.cc index e9b89d6b124..ddcb5117338 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -85,27 +85,29 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, uint null_bit_pos, new_frm_ver, field_pack_length; SQL_CRYPT *crypted=0; MEM_ROOT **root_ptr, *old_root; + TABLE_SHARE *share; DBUG_ENTER("openfrm"); DBUG_PRINT("enter",("name: '%s' form: 0x%lx",name,outparam)); - error=1; - disk_buff=NULL; + error= 1; + disk_buff= NULL; root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC); old_root= *root_ptr; + bzero((char*) outparam,sizeof(*outparam)); + outparam->in_use= thd; + outparam->s= share= &outparam->share_not_to_be_used; + if ((file=my_open(fn_format(index_file, name, "", reg_ext, MY_UNPACK_FILENAME), O_RDONLY | O_SHARE, MYF(0))) < 0) - { goto err_w_init; - } + error= 4; if (my_read(file,(byte*) head,64,MYF(MY_NABP))) - { goto err_w_init; - } if (memcmp(head, "TYPE=", 5) == 0) { @@ -116,28 +118,22 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, DBUG_RETURN(5); // caller can't process new .frm - error= 4; goto err_w_init; } - bzero((char*) outparam,sizeof(*outparam)); - outparam->in_use= thd; - outparam->blob_ptr_size=sizeof(char*); - outparam->db_stat = db_stat; + share->blob_ptr_size= sizeof(char*); + outparam->db_stat= db_stat; init_sql_alloc(&outparam->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0); *root_ptr= &outparam->mem_root; - outparam->real_name=strdup_root(&outparam->mem_root, - name+dirname_length(name)); - *fn_ext(outparam->real_name)='\0'; // Remove extension - outparam->table_name=my_strdup(alias,MYF(MY_WME)); - if (!outparam->real_name || !outparam->table_name) - goto err_end; - - error=4; - if (!(outparam->path= strdup_root(&outparam->mem_root,name))) + share->table_name= strdup_root(&outparam->mem_root, + name+dirname_length(name)); + share->path= strdup_root(&outparam->mem_root, name); + outparam->alias= my_strdup(alias, MYF(MY_WME)); + if (!share->table_name || !share->path || !outparam->alias) goto err_not_open; - *fn_ext(outparam->path)='\0'; // Remove extension + *fn_ext(share->table_name)='\0'; // Remove extension + *fn_ext(share->path)='\0'; // Remove extension if (head[0] != (uchar) 254 || head[1] != 1 || (head[2] != FRM_VER && head[2] != FRM_VER+1 && @@ -152,31 +148,34 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, goto err_not_open; /* purecov: inspected */ *fn_ext(index_file)='\0'; // Remove .frm extension - outparam->frm_version= head[2]; - outparam->db_type=ha_checktype((enum db_type) (uint) *(head+3)); - outparam->db_create_options=db_create_options=uint2korr(head+30); - outparam->db_options_in_use=outparam->db_create_options; - null_field_first=0; + share->frm_version= head[2]; + share->db_type= ha_checktype((enum db_type) (uint) *(head+3)); + share->db_create_options= db_create_options=uint2korr(head+30); + share->db_options_in_use= share->db_create_options; + null_field_first= 0; if (!head[32]) // New frm file in 3.23 { - outparam->avg_row_length=uint4korr(head+34); - outparam->row_type=(row_type) head[40]; - outparam->raid_type= head[41]; - outparam->raid_chunks= head[42]; - outparam->raid_chunksize= uint4korr(head+43); - outparam->table_charset=get_charset((uint) head[38],MYF(0)); - null_field_first=1; + share->avg_row_length= uint4korr(head+34); + share-> row_type= (row_type) head[40]; + share->raid_type= head[41]; + share->raid_chunks= head[42]; + share->raid_chunksize= uint4korr(head+43); + share->table_charset= get_charset((uint) head[38],MYF(0)); + null_field_first= 1; } - if (!outparam->table_charset) /* unknown charset in head[38] or pre-3.23 frm */ - outparam->table_charset=default_charset_info; - outparam->db_record_offset=1; + if (!share->table_charset) + { + /* unknown charset in head[38] or pre-3.23 frm */ + share->table_charset= default_charset_info; + } + share->db_record_offset= 1; if (db_create_options & HA_OPTION_LONG_BLOB_PTR) - outparam->blob_ptr_size=portable_sizeof_char_ptr; + share->blob_ptr_size= portable_sizeof_char_ptr; /* Set temporarily a good value for db_low_byte_first */ - outparam->db_low_byte_first=test(outparam->db_type != DB_TYPE_ISAM); + share->db_low_byte_first= test(share->db_type != DB_TYPE_ISAM); error=4; - outparam->max_rows=uint4korr(head+18); - outparam->min_rows=uint4korr(head+22); + share->max_rows= uint4korr(head+18); + share->min_rows= uint4korr(head+22); /* Read keyinformation */ key_info_length= (uint) uint2korr(head+28); @@ -185,17 +184,16 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, goto err_not_open; /* purecov: inspected */ if (disk_buff[0] & 0x80) { - outparam->keys= keys= (disk_buff[1] << 7) | (disk_buff[0] & 0x7f); - outparam->key_parts= key_parts= uint2korr(disk_buff+2); + share->keys= keys= (disk_buff[1] << 7) | (disk_buff[0] & 0x7f); + share->key_parts= key_parts= uint2korr(disk_buff+2); } else { - outparam->keys= keys= disk_buff[0]; - outparam->key_parts= key_parts= disk_buff[1]; + share->keys= keys= disk_buff[0]; + share->key_parts= key_parts= disk_buff[1]; } - outparam->keys_for_keyread.init(0); - outparam->keys_in_use.init(keys); - outparam->read_only_keys.init(keys); + share->keys_for_keyread.init(0); + share->keys_in_use.init(keys); outparam->quick_keys.init(); outparam->used_keys.init(); outparam->keys_in_use_for_query.init(); @@ -206,7 +204,6 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, goto err_not_open; /* purecov: inspected */ bzero((char*) keyinfo,n_length); outparam->key_info=keyinfo; - outparam->max_key_length= outparam->total_key_length= 0; key_part= my_reinterpret_cast(KEY_PART_INFO*) (keyinfo+keys); strpos=disk_buff+6; @@ -267,9 +264,9 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, keynames=(char*) key_part; strpos+= (strmov(keynames, (char *) strpos) - keynames)+1; - outparam->reclength = uint2korr((head+16)); + share->reclength = uint2korr((head+16)); if (*(head+26) == 1) - outparam->system=1; /* one-record-database */ + share->system= 1; /* one-record-database */ #ifdef HAVE_CRYPTED_FRM else if (*(head+26) == 2) { @@ -281,45 +278,37 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, #endif /* Allocate handler */ - if (!(outparam->file= get_new_handler(outparam,outparam->db_type))) + if (!(outparam->file= get_new_handler(outparam, share->db_type))) goto err_not_open; error=4; outparam->reginfo.lock_type= TL_UNLOCK; outparam->current_lock=F_UNLCK; - if ((db_stat & HA_OPEN_KEYFILE) || (prgflag & DELAYED_OPEN)) records=2; - else records=1; + if ((db_stat & HA_OPEN_KEYFILE) || (prgflag & DELAYED_OPEN)) + records=2; + else + records=1; if (prgflag & (READ_ALL+EXTRA_RECORD)) records++; /* QQ: TODO, remove the +1 from below */ - rec_buff_length=ALIGN_SIZE(outparam->reclength+1+ - outparam->file->extra_rec_buf_length()); - if (!(outparam->record[0]= (byte*) - (record = (char *) alloc_root(&outparam->mem_root, - rec_buff_length * records)))) + rec_buff_length= ALIGN_SIZE(share->reclength + 1 + + outparam->file->extra_rec_buf_length()); + share->rec_buff_length= rec_buff_length; + if (!(record= (char *) alloc_root(&outparam->mem_root, + rec_buff_length * records))) goto err_not_open; /* purecov: inspected */ - record[outparam->reclength]=0; // For purify and ->c_ptr() - outparam->rec_buff_length=rec_buff_length; - if (my_pread(file,(byte*) record,(uint) outparam->reclength, + share->default_values= record; + if (my_pread(file,(byte*) record, (uint) share->reclength, (ulong) (uint2korr(head+6)+ ((uint2korr(head+14) == 0xffff ? uint4korr(head+47) : uint2korr(head+14)))), MYF(MY_NABP))) goto err_not_open; /* purecov: inspected */ - /* HACK: table->record[2] is used instead of table->default_values here */ - for (i=0 ; i < records ; i++, record+=rec_buff_length) - { - outparam->record[i]=(byte*) record; - if (i) - memcpy(record,record-rec_buff_length,(uint) outparam->reclength); - } - if (records == 2) - { /* fix for select */ - outparam->default_values=outparam->record[1]; - if (db_stat & HA_READ_ONLY) - outparam->record[1]=outparam->record[0]; /* purecov: inspected */ - } - outparam->insert_values=0; /* for INSERT ... UPDATE */ + outparam->record[0]= record+ rec_buff_length; + if (records > 2) + outparam->record[1]= record+ rec_buff_length*2; + else + outparam->record[1]= outparam->record[0]; // Safety VOID(my_seek(file,pos,MY_SEEK_SET,MYF(0))); if (my_read(file,(byte*) head,288,MYF(MY_NABP))) goto err_not_open; @@ -330,30 +319,29 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, goto err_not_open; // Wrong password } - outparam->fields= uint2korr(head+258); - pos=uint2korr(head+260); /* Length of all screens */ - n_length=uint2korr(head+268); - interval_count=uint2korr(head+270); - interval_parts=uint2korr(head+272); - int_length=uint2korr(head+274); - outparam->null_fields=uint2korr(head+282); - com_length=uint2korr(head+284); - outparam->comment=strdup_root(&outparam->mem_root, - (char*) head+47); + share->fields= uint2korr(head+258); + pos= uint2korr(head+260); /* Length of all screens */ + n_length= uint2korr(head+268); + interval_count= uint2korr(head+270); + interval_parts= uint2korr(head+272); + int_length= uint2korr(head+274); + share->null_fields= uint2korr(head+282); + com_length= uint2korr(head+284); + share->comment= strdup_root(&outparam->mem_root, (char*) head+47); - DBUG_PRINT("info",("i_count: %d i_parts: %d index: %d n_length: %d int_length: %d com_length: %d", interval_count,interval_parts, outparam->keys,n_length,int_length, com_length)); + DBUG_PRINT("info",("i_count: %d i_parts: %d index: %d n_length: %d int_length: %d com_length: %d", interval_count,interval_parts, share->keys,n_length,int_length, com_length)); if (!(field_ptr = (Field **) alloc_root(&outparam->mem_root, - (uint) ((outparam->fields+1)*sizeof(Field*)+ + (uint) ((share->fields+1)*sizeof(Field*)+ interval_count*sizeof(TYPELIB)+ - (outparam->fields+interval_parts+ + (share->fields+interval_parts+ keys+3)*sizeof(my_string)+ (n_length+int_length+com_length))))) goto err_not_open; /* purecov: inspected */ outparam->field=field_ptr; - read_length=(uint) (outparam->fields * field_pack_length + + read_length=(uint) (share->fields * field_pack_length + pos+ (uint) (n_length+int_length+com_length)); if (read_string(file,(gptr*) &disk_buff,read_length)) goto err_not_open; /* purecov: inspected */ @@ -365,25 +353,25 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, } strpos= disk_buff+pos; - outparam->intervals= (TYPELIB*) (field_ptr+outparam->fields+1); - int_array= (const char **) (outparam->intervals+interval_count); - names= (char*) (int_array+outparam->fields+interval_parts+keys+3); + share->intervals= (TYPELIB*) (field_ptr+share->fields+1); + int_array= (const char **) (share->intervals+interval_count); + names= (char*) (int_array+share->fields+interval_parts+keys+3); if (!interval_count) - outparam->intervals=0; // For better debugging - memcpy((char*) names, strpos+(outparam->fields*field_pack_length), + share->intervals= 0; // For better debugging + memcpy((char*) names, strpos+(share->fields*field_pack_length), (uint) (n_length+int_length)); - comment_pos=names+(n_length+int_length); + comment_pos= names+(n_length+int_length); memcpy(comment_pos, disk_buff+read_length-com_length, com_length); - fix_type_pointers(&int_array,&outparam->fieldnames,1,&names); - fix_type_pointers(&int_array,outparam->intervals,interval_count, + fix_type_pointers(&int_array, &share->fieldnames, 1, &names); + fix_type_pointers(&int_array, share->intervals, interval_count, &names); { /* Set ENUM and SET lengths */ TYPELIB *interval; - for (interval= outparam->intervals; - interval < outparam->intervals + interval_count; + for (interval= share->intervals; + interval < share->intervals + interval_count; interval++) { uint count= (uint) (interval->count + 1) * sizeof(uint); @@ -397,33 +385,33 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, } if (keynames) - fix_type_pointers(&int_array,&outparam->keynames,1,&keynames); + fix_type_pointers(&int_array, &share->keynames, 1, &keynames); VOID(my_close(file,MYF(MY_WME))); file= -1; - record=(char*) outparam->record[0]-1; /* Fieldstart = 1 */ + record= (char*) outparam->record[0]-1; /* Fieldstart = 1 */ if (null_field_first) { outparam->null_flags=null_pos=(uchar*) record+1; null_bit_pos= (db_create_options & HA_OPTION_PACK_RECORD) ? 0 : 1; - outparam->null_bytes= (outparam->null_fields + null_bit_pos + 7) / 8; + share->null_bytes= (share->null_fields + null_bit_pos + 7) / 8; } else { - outparam->null_bytes=(outparam->null_fields+7)/8; - outparam->null_flags=null_pos= - (uchar*) (record+1+outparam->reclength-outparam->null_bytes); + share->null_bytes= (share->null_fields+7)/8; + outparam->null_flags= null_pos= + (uchar*) (record+1+share->reclength-share->null_bytes); null_bit_pos= 0; } - use_hash= outparam->fields >= MAX_FIELDS_BEFORE_HASH; + use_hash= share->fields >= MAX_FIELDS_BEFORE_HASH; if (use_hash) - use_hash= !hash_init(&outparam->name_hash, + use_hash= !hash_init(&share->name_hash, system_charset_info, - outparam->fields,0,0, + share->fields,0,0, (hash_get_key) get_field_name,0,0); - for (i=0 ; i < outparam->fields; i++, strpos+=field_pack_length, field_ptr++) + for (i=0 ; i < share->fields; i++, strpos+=field_pack_length, field_ptr++) { uint pack_flag, interval_nr, unireg_type, recpos, field_length; enum_field_types field_type; @@ -498,7 +486,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, if (!f_is_blob(pack_flag)) { // 3.23 or 4.0 string - if (!(charset= get_charset_by_csname(outparam->table_charset->csname, + if (!(charset= get_charset_by_csname(share->table_charset->csname, MY_CS_BINSORT, MYF(0)))) charset= &my_charset_bin; } @@ -506,14 +494,14 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, charset= &my_charset_bin; } else - charset= outparam->table_charset; + charset= share->table_charset; bzero((char*) &comment, sizeof(comment)); } if (interval_nr && charset->mbminlen > 1) { /* Unescape UCS2 intervals from HEX notation */ - TYPELIB *interval= outparam->intervals + interval_nr - 1; + TYPELIB *interval= share->intervals + interval_nr - 1; unhex_type2(interval); } @@ -527,9 +515,9 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, geom_type, (Field::utype) MTYP_TYPENR(unireg_type), (interval_nr ? - outparam->intervals+interval_nr-1 : + share->intervals+interval_nr-1 : (TYPELIB*) 0), - outparam->fieldnames.type_names[i], + share->fieldnames.type_names[i], outparam); if (!reg_field) // Not supported field type { @@ -555,9 +543,9 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, if (reg_field->unireg_check == Field::NEXT_NUMBER) outparam->found_next_number_field= reg_field; if (outparam->timestamp_field == reg_field) - outparam->timestamp_field_offset=i; + share->timestamp_field_offset= i; if (use_hash) - (void) my_hash_insert(&outparam->name_hash,(byte*) field_ptr); // Will never fail + (void) my_hash_insert(&share->name_hash,(byte*) field_ptr); // never fail } *field_ptr=0; // End marker @@ -565,15 +553,15 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, if (key_parts) { uint primary_key=(uint) (find_type((char*) primary_key_name, - &outparam->keynames, 3) - 1); + &share->keynames, 3) - 1); uint ha_option=outparam->file->table_flags(); keyinfo=outparam->key_info; key_part=keyinfo->key_part; - for (uint key=0 ; key < outparam->keys ; key++,keyinfo++) + for (uint key=0 ; key < share->keys ; key++,keyinfo++) { uint usable_parts=0; - keyinfo->name=(char*) outparam->keynames.type_names[key]; + keyinfo->name=(char*) share->keynames.type_names[key]; /* Fix fulltext keys for old .frm files */ if (outparam->key_info[key].flags & HA_FULLTEXT) outparam->key_info[key].algorithm= HA_KEY_ALG_FULLTEXT; @@ -606,7 +594,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, (uint) key_part->offset, (uint) key_part->length); #ifdef EXTRA_DEBUG - if (key_part->fieldnr > outparam->fields) + if (key_part->fieldnr > share->fields) goto err_not_open; // sanity check #endif if (key_part->fieldnr) @@ -654,8 +642,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, { if (outparam->file->index_flags(key, i, 0) & HA_KEYREAD_ONLY) { - outparam->read_only_keys.clear_bit(key); - outparam->keys_for_keyread.set_bit(key); + share->keys_for_keyread.set_bit(key); field->part_of_key.set_bit(key); } if (outparam->file->index_flags(key, i, 1) & HA_READ_ORDER) @@ -673,7 +660,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, the primary key, then we can use any key to find this column */ if (ha_option & HA_PRIMARY_KEY_IN_READ_INDEX) - field->part_of_key= outparam->keys_in_use; + field->part_of_key= share->keys_in_use; } if (field->key_length() != key_part->length) { @@ -701,16 +688,16 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, } keyinfo->usable_key_parts=usable_parts; // Filesort - set_if_bigger(outparam->max_key_length,keyinfo->key_length+ + set_if_bigger(share->max_key_length,keyinfo->key_length+ keyinfo->key_parts); - outparam->total_key_length+= keyinfo->key_length; + share->total_key_length+= keyinfo->key_length; if (keyinfo->flags & HA_NOSAME) - set_if_bigger(outparam->max_unique_length,keyinfo->key_length); + set_if_bigger(share->max_unique_length, keyinfo->key_length); } if (primary_key < MAX_KEY && - (outparam->keys_in_use.is_set(primary_key))) + (share->keys_in_use.is_set(primary_key))) { - outparam->primary_key=primary_key; + share->primary_key= primary_key; /* If we are using an integer as the primary key then allow the user to refer to it as '_rowid' @@ -723,27 +710,25 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, } } else - outparam->primary_key = MAX_KEY; // we do not have a primary key + share->primary_key = MAX_KEY; // we do not have a primary key } else - outparam->primary_key= MAX_KEY; + share->primary_key= MAX_KEY; x_free((gptr) disk_buff); disk_buff=0; if (new_field_pack_flag <= 1) - { /* Old file format with default null */ - uint null_length=(outparam->null_fields+7)/8; - bfill(outparam->null_flags,null_length,255); - bfill(outparam->null_flags+outparam->rec_buff_length,null_length,255); - if (records > 2) - bfill(outparam->null_flags+outparam->rec_buff_length*2,null_length,255); + { + /* Old file format with default as not null */ + uint null_length= (share->null_fields+7)/8; + bfill(share->default_values + (outparam->null_flags - (uchar*) record), + null_length, 255); } - if ((reg_field=outparam->found_next_number_field)) { - if ((int) (outparam->next_number_index= (uint) + if ((int) (share->next_number_index= (uint) find_ref_key(outparam,reg_field, - &outparam->next_number_key_offset)) < 0) + &share->next_number_key_offset)) < 0) { reg_field->unireg_check=Field::NONE; /* purecov: inspected */ outparam->found_next_number_field=0; @@ -752,26 +737,22 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, reg_field->flags|=AUTO_INCREMENT_FLAG; } - if (outparam->blob_fields) + if (share->blob_fields) { Field **ptr; - Field_blob **save; + uint i, *save; - if (!(outparam->blob_field=save= - (Field_blob**) alloc_root(&outparam->mem_root, - (uint) (outparam->blob_fields+1)* - sizeof(Field_blob*)))) + /* Store offsets to blob fields to find them fast */ + if (!(share->blob_field= save= + (uint*) alloc_root(&outparam->mem_root, + (uint) (share->blob_fields* sizeof(uint))))) goto err_not_open; - for (ptr=outparam->field ; *ptr ; ptr++) + for (i=0, ptr= outparam->field ; *ptr ; ptr++, i++) { if ((*ptr)->flags & BLOB_FLAG) - (*save++)= (Field_blob*) *ptr; + (*save++)= i; } - *save=0; // End marker } - else - outparam->blob_field= - (Field_blob**) (outparam->field+outparam->fields); // Point at null ptr /* The table struct is now initialized; Open the table */ error=2; @@ -791,11 +772,11 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, HA_OPEN_IGNORE_IF_LOCKED) | ha_open_flags)))) { /* Set a flag if the table is crashed and it can be auto. repaired */ - outparam->crashed=((err == HA_ERR_CRASHED_ON_USAGE) && - outparam->file->auto_repair() && - !(ha_open_flags & HA_OPEN_FOR_REPAIR)); + share->crashed= ((err == HA_ERR_CRASHED_ON_USAGE) && + outparam->file->auto_repair() && + !(ha_open_flags & HA_OPEN_FOR_REPAIR)); - if (err==HA_ERR_NO_SUCH_TABLE) + if (err == HA_ERR_NO_SUCH_TABLE) { /* The table did not exists in storage engine, use same error message as if the .frm file didn't exist */ @@ -810,27 +791,29 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, goto err_not_open; /* purecov: inspected */ } } - outparam->db_low_byte_first=outparam->file->low_byte_first(); + share->db_low_byte_first= outparam->file->low_byte_first(); *root_ptr= old_root; thd->status_var.opened_tables++; #ifndef DBUG_OFF if (use_hash) - (void) hash_check(&outparam->name_hash); + (void) hash_check(&share->name_hash); #endif DBUG_RETURN (0); err_w_init: - /* Avoid problem with uninitialized data */ + /* + Avoid problem with uninitialized data + Note that we don't have to initialize outparam->s here becasue + the caller will check if the pointer exists in case of errors + */ bzero((char*) outparam,sizeof(*outparam)); - outparam->real_name= (char*)name+dirname_length(name); err_not_open: x_free((gptr) disk_buff); if (file > 0) VOID(my_close(file,MYF(MY_WME))); - err_end: /* Here when no file */ delete crypted; *root_ptr= old_root; if (! error_reported) @@ -838,9 +821,9 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, delete outparam->file; outparam->file=0; // For easier errorchecking outparam->db_stat=0; - hash_free(&outparam->name_hash); - free_root(&outparam->mem_root,MYF(0)); - my_free(outparam->table_name,MYF(MY_ALLOW_ZERO_PTR)); + hash_free(&share->name_hash); + free_root(&share->mem_root, MYF(0)); + my_free((char*) outparam->alias, MYF(MY_ALLOW_ZERO_PTR)); DBUG_RETURN (error); } /* openfrm */ @@ -853,21 +836,18 @@ int closefrm(register TABLE *table) DBUG_ENTER("closefrm"); if (table->db_stat) error=table->file->close(); - if (table->table_name) - { - my_free(table->table_name,MYF(0)); - table->table_name=0; - } - if (table->fields) + my_free((char*) table->alias, MYF(MY_ALLOW_ZERO_PTR)); + table->alias= 0; + if (table->field) { for (Field **ptr=table->field ; *ptr ; ptr++) delete *ptr; - table->fields=0; + table->field= 0; } delete table->file; - table->file=0; /* For easier errorchecking */ - hash_free(&table->name_hash); - free_root(&table->mem_root,MYF(0)); + table->file= 0; /* For easier errorchecking */ + hash_free(&table->s->name_hash); + free_root(&table->mem_root, MYF(0)); DBUG_RETURN(error); } @@ -876,8 +856,11 @@ int closefrm(register TABLE *table) void free_blobs(register TABLE *table) { - for (Field_blob **ptr=table->blob_field ; *ptr ; ptr++) - (*ptr)->free(); + uint *ptr, *end; + for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ; + ptr != end ; + ptr++) + ((Field_blob*) table->field[*ptr])->free(); } @@ -1025,6 +1008,7 @@ static void frm_error(int error, TABLE *form, const char *name, int err_no; char buff[FN_REFLEN]; const char *form_dev="",*datext; + const char *real_name= (char*) name+dirname_length(name); DBUG_ENTER("frm_error"); switch (error) { @@ -1035,7 +1019,7 @@ static void frm_error(int error, TABLE *form, const char *name, uint length=dirname_part(buff,name); buff[length-1]=0; db=buff+dirname_length(buff); - my_error(ER_NO_SUCH_TABLE, MYF(0), db, form->real_name); + my_error(ER_NO_SUCH_TABLE, MYF(0), db, real_name); } else my_error(ER_FILE_NOT_FOUND, errortype, @@ -1048,7 +1032,7 @@ static void frm_error(int error, TABLE *form, const char *name, err_no= (my_errno == ENOENT) ? ER_FILE_NOT_FOUND : (my_errno == EAGAIN) ? ER_FILE_USED : ER_CANT_OPEN_FILE; my_error(err_no,errortype, - fn_format(buff,form->real_name,form_dev,datext,2),my_errno); + fn_format(buff,real_name,form_dev,datext,2),my_errno); break; } case 5: @@ -1062,7 +1046,7 @@ static void frm_error(int error, TABLE *form, const char *name, } my_printf_error(ER_UNKNOWN_COLLATION, "Unknown collation '%s' in table '%-.64s' definition", - MYF(0), csname, form->real_name); + MYF(0), csname, real_name); break; } default: /* Better wrong error than none */ @@ -1139,22 +1123,27 @@ TYPELIB *typelib(List &strings) } - /* - ** Search after a field with given start & length - ** If an exact field isn't found, return longest field with starts - ** at right position. - ** Return 0 on error, else field number+1 - ** This is needed because in some .frm fields 'fieldnr' was saved wrong - */ +/* + Search after a field with given start & length + If an exact field isn't found, return longest field with starts + at right position. + + NOTES + This is needed because in some .frm fields 'fieldnr' was saved wrong + + RETURN + 0 error + # field number +1 +*/ static uint find_field(TABLE *form,uint start,uint length) { Field **field; - uint i,pos; + uint i, pos, fields; pos=0; - - for (field=form->field, i=1 ; i<= form->fields ; i++,field++) + fields= form->s->fields; + for (field=form->field, i=1 ; i<= fields ; i++,field++) { if ((*field)->offset() == start) { @@ -1322,17 +1311,20 @@ File create_frm(register my_string name, uint reclength, uchar *fileinfo, void update_create_info_from_table(HA_CREATE_INFO *create_info, TABLE *table) { + TABLE_SHARE *share= table->s; DBUG_ENTER("update_create_info_from_table"); - create_info->max_rows=table->max_rows; - create_info->min_rows=table->min_rows; - create_info->table_options=table->db_create_options; - create_info->avg_row_length=table->avg_row_length; - create_info->row_type=table->row_type; - create_info->raid_type=table->raid_type; - create_info->raid_chunks=table->raid_chunks; - create_info->raid_chunksize=table->raid_chunksize; - create_info->default_table_charset=table->table_charset; + + create_info->max_rows= share->max_rows; + create_info->min_rows= share->min_rows; + create_info->table_options= share->db_create_options; + create_info->avg_row_length= share->avg_row_length; + create_info->row_type= share->row_type; + create_info->raid_type= share->raid_type; + create_info->raid_chunks= share->raid_chunks; + create_info->raid_chunksize= share->raid_chunksize; + create_info->default_table_charset= share->table_charset; create_info->table_charset= 0; + DBUG_VOID_RETURN; } @@ -1702,6 +1694,7 @@ bool st_table_list::setup_ancestor(THD *thd, Item **conds, if (field_translation) { + DBUG_PRINT("info", ("there are already translation table")); /* prevent look up in SELECTs tree */ thd->lex->current_select= &thd->lex->select_lex; thd->lex->select_lex.no_wrap_view_item= 1; @@ -1726,6 +1719,9 @@ bool st_table_list::setup_ancestor(THD *thd, Item **conds, } if (where && !where->fixed && where->fix_fields(thd, ancestor, &where)) goto err; + if (check_option && !check_option->fixed && + check_option->fix_fields(thd, ancestor, &check_option)) + goto err; restore_want_privilege(); /* WHERE/ON resolved => we can rename fields */ @@ -1764,11 +1760,11 @@ bool st_table_list::setup_ancestor(THD *thd, Item **conds, { /* save original name of view column */ char *name= item->name; - if (!item->fixed && item->fix_fields(thd, ancestor, &item)) + transl[i].item= item; + if (!item->fixed && item->fix_fields(thd, ancestor, &transl[i].item)) goto err; /* set new item get in fix fields and original column name */ - transl[i].name= name; - transl[i++].item= item; + transl[i++].name= name; } field_translation= transl; /* TODO: sort this list? Use hash for big number of fields */ @@ -1873,11 +1869,19 @@ bool st_table_list::setup_ancestor(THD *thd, Item **conds, /* full text function moving to current select */ if (view->select_lex.ftfunc_list->elements) { + Item_arena *arena= thd->current_arena, backup; + if (arena->is_conventional()) + arena= 0; // For easier test + else + thd->set_n_backup_item_arena(arena, &backup); + Item_func_match *ifm; List_iterator_fast li(*(view->select_lex.ftfunc_list)); while ((ifm= li++)) current_select_save->ftfunc_list->push_front(ifm); + if (arena) + thd->restore_backup_item_arena(arena, &backup); } ok: @@ -2012,7 +2016,7 @@ bool st_table_list::set_insert_values(MEM_ROOT *mem_root) { if (!table->insert_values && !(table->insert_values= (byte *)alloc_root(mem_root, - table->rec_buff_length))) + table->s->rec_buff_length))) return TRUE; } else diff --git a/sql/table.h b/sql/table.h index 391b4908f96..c4cb948914e 100644 --- a/sql/table.h +++ b/sql/table.h @@ -38,7 +38,7 @@ typedef struct st_order { bool counter_used; /* parameter was counter of columns */ Field *field; /* If tmp-table group */ char *buff; /* If tmp-table group */ - table_map used,depend_map; + table_map used, depend_map; } ORDER; typedef struct st_grant_info @@ -80,48 +80,118 @@ enum timestamp_auto_set_type TIMESTAMP_AUTO_SET_ON_UPDATE= 2, TIMESTAMP_AUTO_SET_ON_BOTH= 3 }; -/* Table cache entry struct */ - class Field_timestamp; class Field_blob; class Table_triggers_list; -struct st_table { - handler *file; - Field **field; /* Pointer to fields */ - Field_blob **blob_field; /* Pointer to blob fields */ +/* This structure is shared between different table objects */ + +typedef struct st_table_share +{ /* hash of field names (contains pointers to elements of field array) */ - HASH name_hash; - byte *record[2]; /* Pointer to records */ - byte *default_values; /* Default values for INSERT */ - byte *insert_values; /* used by INSERT ... UPDATE */ - uint fields; /* field count */ - uint reclength; /* Recordlength */ - uint rec_buff_length; - uint keys,key_parts,primary_key,max_key_length,max_unique_length; - uint total_key_length; - uint uniques; - uint null_fields; /* number of null fields */ - uint blob_fields; /* number of blob fields */ - key_map keys_in_use, keys_for_keyread, read_only_keys; - key_map quick_keys, used_keys, keys_in_use_for_query; - KEY *key_info; /* data of keys in database */ + HASH name_hash; /* hash of field names */ + MEM_ROOT mem_root; TYPELIB keynames; /* Pointers to keynames */ - ha_rows max_rows; /* create information */ - ha_rows min_rows; /* create information */ - ulong avg_row_length; /* create information */ - ulong raid_chunksize; TYPELIB fieldnames; /* Pointer to fieldnames */ TYPELIB *intervals; /* pointer to interval info */ +#ifdef NOT_YET + pthread_mutex_t mutex; /* For locking the share */ + pthread_cond_t cond; /* To signal that share is ready */ + struct st_table *open_tables; /* link to open tables */ + struct st_table *used_next, /* Link to used tables */ + **used_prev; + /* The following is copied to each TABLE on OPEN */ + Field **field; + KEY *key_info; /* data of keys in database */ +#endif + uint *blob_field; /* Index to blobs in Field arrray*/ + byte *default_values; /* row with default values */ + char *comment; /* Comment about table */ + CHARSET_INFO *table_charset; /* Default charset of string fields */ + + /* A pair "database_name\0table_name\0", widely used as simply a db name */ + char *table_cache_key; + const char *db; /* Pointer to db */ + const char *table_name; /* Table name (for open) */ + const char *path; /* Path to .frm file (from datadir) */ + key_map keys_in_use; /* Keys in use for table */ + key_map keys_for_keyread; + ulong avg_row_length; /* create information */ + ulong raid_chunksize; + ulong version, flush_version; + ulong timestamp_offset; /* Set to offset+1 of record */ + ulong reclength; /* Recordlength */ + + ha_rows min_rows, max_rows; /* create information */ enum db_type db_type; /* table_type for handler */ enum row_type row_type; /* How rows are stored */ + enum tmp_table_type tmp_table; + + uint blob_ptr_size; /* 4 or 8 */ + uint null_bytes; + uint key_length; /* Length of table_cache_key */ + uint fields; /* Number of fields */ + uint rec_buff_length; /* Size of table->record[] buffer */ + uint keys, key_parts; + uint max_key_length, max_unique_length, total_key_length; + uint uniques; /* Number of UNIQUE index */ + uint null_fields; /* number of null fields */ + uint blob_fields; /* number of blob fields */ uint db_create_options; /* Create options from database */ uint db_options_in_use; /* Options in use */ uint db_record_offset; /* if HA_REC_IN_SEQ */ - uint db_stat; /* mode of file as in handler.h */ - uint raid_type,raid_chunks; - uint status; /* Used by postfix.. */ - uint system; /* Set if system record */ + uint raid_type, raid_chunks; + uint open_count; /* Number of tables in open list */ + /* Index of auto-updated TIMESTAMP field in field array */ + uint primary_key; + uint timestamp_field_offset; + uint next_number_index; + uint next_number_key_offset; + uchar frm_version; + my_bool system; /* Set if system record */ + my_bool crypted; /* If .frm file is crypted */ + my_bool db_low_byte_first; /* Portable row format */ + my_bool crashed; + my_bool is_view; + my_bool name_lock, replace_with_name_lock; +} TABLE_SHARE; + + +/* Information for one open table */ + +struct st_table { + TABLE_SHARE *s; + handler *file; +#ifdef NOT_YET + struct st_table *used_next, **used_prev; /* Link to used tables */ + struct st_table *open_next, **open_prev; /* Link to open tables */ +#endif + struct st_table *next, *prev; + + THD *in_use; /* Which thread uses this */ + Field **field; /* Pointer to fields */ + + byte *record[2]; /* Pointer to records */ + byte *insert_values; /* used by INSERT ... UPDATE */ + key_map quick_keys, used_keys, keys_in_use_for_query; + KEY *key_info; /* data of keys in database */ + + Field *next_number_field, /* Set if next_number is activated */ + *found_next_number_field, /* Set on open */ + *rowid_field; + Field_timestamp *timestamp_field; + + /* Table's triggers, 0 if there are no of them */ + Table_triggers_list *triggers; + struct st_table_list *pos_in_table_list;/* Element referring to this table */ + ORDER *group; + const char *alias; /* alias or table name */ + uchar *null_flags; + ulong query_id; + + ha_rows quick_rows[MAX_KEY]; + key_part_map const_key_parts[MAX_KEY]; + uint quick_key_parts[MAX_KEY]; /* If this table has TIMESTAMP field with auto-set property (pointed by @@ -136,14 +206,15 @@ struct st_table { as example). */ timestamp_auto_set_type timestamp_field_type; - /* Index of auto-updated TIMESTAMP field in field array */ - uint timestamp_field_offset; + table_map map; /* ID bit of table (1,2,4,8,16...) */ - uint next_number_index; - uint blob_ptr_size; /* 4 or 8 */ - uint next_number_key_offset; - int current_lock; /* Type of lock on table */ - enum tmp_table_type tmp_table; + uint tablenr,used_fields; + uint temp_pool_slot; /* Used by intern temp tables */ + uint status; /* What's in record[0] */ + uint db_stat; /* mode of file as in handler.h */ + /* number of select if it is derived table */ + uint derived_select_number; + int current_lock; /* Type of lock on table */ my_bool copy_blobs; /* copy_blobs when storing */ /* Used in outer joins: if true, all columns are considered to have NULL @@ -155,60 +226,22 @@ struct st_table { my_bool maybe_null; /* true if (outer_join != 0) */ my_bool force_index; my_bool distinct,const_table,no_rows; - my_bool key_read; - my_bool crypted; - my_bool db_low_byte_first; /* Portable row format */ + my_bool key_read, no_keyread; my_bool locked_by_flush; my_bool locked_by_name; my_bool fulltext_searched; - my_bool crashed; - my_bool is_view; - my_bool no_keyread, no_cache; - my_bool clear_query_id; /* To reset query_id for tables and cols */ + my_bool no_cache; + /* To signal that we should reset query_id for tables and cols */ + my_bool clear_query_id; my_bool auto_increment_field_not_null; my_bool insert_or_update; /* Can be used by the handler */ my_bool alias_name_used; /* true if table_name is alias */ - Field *next_number_field, /* Set if next_number is activated */ - *found_next_number_field, /* Set on open */ - *rowid_field; - Field_timestamp *timestamp_field; -#if MYSQL_VERSION_ID < 40100 - /* - Indicates whenever we have to set field_length members of all TIMESTAMP - fields to 19 (to honour 'new_mode' variable) or to original - field_length values. - */ - my_bool timestamp_mode; -#endif - my_string comment; /* Comment about table */ - CHARSET_INFO *table_charset; /* Default charset of string fields */ + REGINFO reginfo; /* field connections */ MEM_ROOT mem_root; GRANT_INFO grant; - /* Table's triggers, 0 if there are no of them */ - Table_triggers_list *triggers; - - /* A pair "database_name\0table_name\0", widely used as simply a db name */ - char *table_cache_key; - char *table_name,*real_name,*path; - uint key_length; /* Length of key */ - uint tablenr,used_fields,null_bytes; - table_map map; /* ID bit of table (1,2,4,8,16...) */ - ulong version,flush_version; - uchar *null_flags; FILESORT_INFO sort; - ORDER *group; - ha_rows quick_rows[MAX_KEY]; - uint quick_key_parts[MAX_KEY]; - key_part_map const_key_parts[MAX_KEY]; - ulong query_id; - uchar frm_version; - uint temp_pool_slot; /* Used by intern temp tables */ - struct st_table_list *pos_in_table_list;/* Element referring to this table */ - /* number of select if it is derived table */ - uint derived_select_number; - THD *in_use; /* Which thread uses this */ - struct st_table *next,*prev; + TABLE_SHARE share_not_to_be_used; /* To be deleted when true shares */ }; @@ -243,6 +276,7 @@ typedef struct st_field_info const char* old_name; } ST_FIELD_INFO; + struct st_table_list; typedef class Item COND; @@ -283,19 +317,21 @@ typedef struct st_schema_table struct st_lex; class select_union; + struct Field_translator { Item *item; const char *name; }; + typedef struct st_table_list { /* link in a local table list (used by SQL_LIST) */ struct st_table_list *next_local; /* link in a global list of all queries tables */ struct st_table_list *next_global, **prev_global; - char *db, *alias, *real_name, *schema_table_name; + char *db, *alias, *table_name, *schema_table_name; char *option; /* Used by cache index */ Item *on_expr; /* Used with outer join */ COND_EQUAL *cond_equal; /* Used with outer join */ @@ -353,7 +389,7 @@ typedef struct st_table_list thr_lock_type lock_type; uint outer_join; /* Which join type */ uint shared; /* Used in multi-upd */ - uint32 db_length, real_name_length; + uint32 db_length, table_name_length; bool updatable; /* VIEW/TABLE can be updated now */ bool straight; /* optimize with prev table */ bool updating; /* for replicate-do/ignore table */ @@ -428,9 +464,11 @@ public: bool end_of_fields() { return ptr == array_end; } const char *name(); Item *item(THD *thd) { return ptr->item; } + Item **item_ptr() {return &ptr->item; } Field *field() { return 0; } }; + typedef struct st_nested_join { List join_list; /* list of elements in the nested join */ @@ -440,6 +478,7 @@ typedef struct st_nested_join uint counter; /* to count tables in the nested join */ } NESTED_JOIN; + typedef struct st_changed_table_list { struct st_changed_table_list *next; @@ -447,6 +486,7 @@ typedef struct st_changed_table_list uint32 key_length; } CHANGED_TABLE_LIST; + typedef struct st_open_table_list{ struct st_open_table_list *next; char *db,*table; diff --git a/sql/tztime.cc b/sql/tztime.cc index dc38580f3b6..b9b9e4821c4 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1416,10 +1416,10 @@ static void tz_init_table_list(TABLE_LIST *tz_tabs, TABLE_LIST ***global_next_ptr) { bzero(tz_tabs, sizeof(TABLE_LIST) * 4); - tz_tabs[0].alias= tz_tabs[0].real_name= (char*)"time_zone_name"; - tz_tabs[1].alias= tz_tabs[1].real_name= (char*)"time_zone"; - tz_tabs[2].alias= tz_tabs[2].real_name= (char*)"time_zone_transition_type"; - tz_tabs[3].alias= tz_tabs[3].real_name= (char*)"time_zone_transition"; + tz_tabs[0].alias= tz_tabs[0].table_name= (char*)"time_zone_name"; + tz_tabs[1].alias= tz_tabs[1].table_name= (char*)"time_zone"; + tz_tabs[2].alias= tz_tabs[2].table_name= (char*)"time_zone_transition_type"; + tz_tabs[3].alias= tz_tabs[3].table_name= (char*)"time_zone_transition"; tz_tabs[0].next_global= tz_tabs[0].next_local= tz_tabs+1; tz_tabs[1].next_global= tz_tabs[1].next_local= tz_tabs+2; tz_tabs[2].next_global= tz_tabs[2].next_local= tz_tabs+3; @@ -1582,7 +1582,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) thd->db= my_strdup("mysql",MYF(0)); thd->db_length= 5; // Safety bzero((char*) &tables_buff, sizeof(TABLE_LIST)); - tables_buff[0].alias= tables_buff[0].real_name= + tables_buff[0].alias= tables_buff[0].table_name= (char*)"time_zone_leap_second"; tables_buff[0].lock_type= TL_READ; tables_buff[0].db= thd->db; diff --git a/sql/unireg.cc b/sql/unireg.cc index ee036ed113d..dd94098fbf3 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -656,6 +656,7 @@ static bool make_empty_rec(File file,enum db_type table_type, /* We need a table to generate columns for default values */ bzero((char*) &table,sizeof(table)); + table.s= &table.share_not_to_be_used; handler= get_new_handler((TABLE*) 0, table_type); if (!handler || @@ -666,8 +667,8 @@ static bool make_empty_rec(File file,enum db_type table_type, } table.in_use= current_thd; - table.db_low_byte_first= handler->low_byte_first(); - table.blob_ptr_size=portable_sizeof_char_ptr; + table.s->db_low_byte_first= handler->low_byte_first(); + table.s->blob_ptr_size= portable_sizeof_char_ptr; firstpos=reclength; null_count=0; diff --git a/sql/unireg.h b/sql/unireg.h index 053ca393ad0..eca540b61b9 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -121,12 +121,12 @@ #define SPECIAL_LOG_QUERIES_NOT_USING_INDEXES 4096 /* Log q not using indexes */ /* Extern defines */ -#define store_record(A,B) bmove_align((A)->B,(A)->record[0],(size_t) (A)->reclength) -#define restore_record(A,B) bmove_align((A)->record[0],(A)->B,(size_t) (A)->reclength) -#define cmp_record(A,B) memcmp((A)->record[0],(A)->B,(size_t) (A)->reclength) +#define store_record(A,B) bmove_align((A)->B,(A)->record[0],(size_t) (A)->s->reclength) +#define restore_record(A,B) bmove_align((A)->record[0],(A)->B,(size_t) (A)->s->reclength) +#define cmp_record(A,B) memcmp((A)->record[0],(A)->B,(size_t) (A)->s->reclength) #define empty_record(A) { \ - restore_record((A),default_values); \ - bfill((A)->null_flags,(A)->null_bytes,255);\ + restore_record((A),s->default_values); \ + bfill((A)->null_flags,(A)->s->null_bytes,255);\ } /* Defines for use with openfrm, openprt and openfrd */ diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index e7f8a035a15..4a7f70b0b87 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -152,15 +152,22 @@ languages and applications need to dynamically load and use MySQL. %package Max Release: %{release} -Summary: MySQL - server with Berkeley DB, RAID and UDF support +Summary: MySQL - server with extended functionality Group: Applications/Databases Provides: mysql-Max Obsoletes: mysql-Max Requires: MySQL-server >= 4.0 %description Max -Optional MySQL server binary that supports additional features like -Berkeley DB, RAID and User Defined Functions (UDFs). +Optional MySQL server binary that supports additional features like: + + - Berkeley DB Storage Engine + - Archive Storage Engine + - CSV Storage Engine + - Example Storage Engine + - MyISAM RAID + - User Defined Functions (UDFs). + To activate this binary, just install this package in addition to the standard MySQL package. @@ -273,6 +280,9 @@ BuildMySQL "--enable-shared \ --with-berkeley-db \ --with-innodb \ --with-raid \ + --with-archive \ + --with-csv-storage-engine \ + --with-example-storage-engine \ --with-embedded-server \ --with-server-suffix='-Max'" @@ -468,8 +478,6 @@ fi %doc %attr(644, root, root) %{_infodir}/mysql.info* -%doc %attr(644, root, man) %{_mandir}/man1/isamchk.1* -%doc %attr(644, root, man) %{_mandir}/man1/isamlog.1* %doc %attr(644, root, man) %{_mandir}/man1/mysql_zap.1* %doc %attr(644, root, man) %{_mandir}/man1/mysqld.1* %doc %attr(644, root, man) %{_mandir}/man1/mysql_fix_privilege_tables.1* @@ -480,8 +488,6 @@ fi %ghost %config(noreplace,missingok) %{_sysconfdir}/my.cnf -%attr(755, root, root) %{_bindir}/isamchk -%attr(755, root, root) %{_bindir}/isamlog %attr(755, root, root) %{_bindir}/my_print_defaults %attr(755, root, root) %{_bindir}/myisamchk %attr(755, root, root) %{_bindir}/myisam_ftdump @@ -502,7 +508,6 @@ fi %attr(755, root, root) %{_bindir}/mysqld_safe %attr(755, root, root) %{_bindir}/mysqlhotcopy %attr(755, root, root) %{_bindir}/mysqltest -%attr(755, root, root) %{_bindir}/pack_isam %attr(755, root, root) %{_bindir}/perror %attr(755, root, root) %{_bindir}/replace %attr(755, root, root) %{_bindir}/resolve_stack_dump @@ -556,7 +561,6 @@ fi %{_includedir}/mysql/* %{_libdir}/mysql/libdbug.a %{_libdir}/mysql/libheap.a -%{_libdir}/mysql/libmerge.a %if %{have_libgcc} %{_libdir}/mysql/libmygcc.a %endif @@ -568,7 +572,6 @@ fi %{_libdir}/mysql/libmysqlclient_r.la %{_libdir}/mysql/libmystrings.a %{_libdir}/mysql/libmysys.a -%{_libdir}/mysql/libnisam.a %{_libdir}/mysql/libvio.a %files shared @@ -597,6 +600,17 @@ fi # itself - note that they must be ordered by date (important when # merging BK trees) %changelog +* Tue Jan 04 2005 Petr Chardin + +- ISAM and merge storage engines were purged. As well as appropriate + tools and manpages (isamchk and isamlog) + +* Thu Dec 31 2004 Lenz Grimmer + +- enabled the "Archive" storage engine for the max binary +- enabled the "CSV" storage engine for the max binary +- enabled the "Example" storage engine for the max binary + * Thu Aug 26 2004 Lenz Grimmer - MySQL-Max now requires MySQL-server instead of MySQL (BUG 3860)