mirror of
https://github.com/MariaDB/server.git
synced 2025-08-01 03:47:19 +03:00
Merge bk-internal.mysql.com:/home/bk/mysql-4.1
into mysql.com:/home/my/mysql-4.1
This commit is contained in:
47
BUILD/compile-dist
Executable file
47
BUILD/compile-dist
Executable file
@ -0,0 +1,47 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# This script's purpose is to update the automake/autoconf helper scripts and
|
||||
# to run a plain "configure" without any special compile flags. Only features
|
||||
# that affect the content of the source distribution are enabled. The resulting
|
||||
# tree can then be picked up by "make dist" to create the "pristine source
|
||||
# package" that is used as the basis for all other binary builds.
|
||||
#
|
||||
make distclean
|
||||
aclocal
|
||||
autoheader
|
||||
libtoolize --automake --force --copy
|
||||
automake --force --add-missing --copy
|
||||
autoconf
|
||||
(cd bdb/dist && sh s_all)
|
||||
(cd innobase && aclocal && autoheader && aclocal && automake && autoconf)
|
||||
|
||||
# Default to gcc for CC and CXX
|
||||
if test -z "$CXX" ; then
|
||||
export CXX=gcc
|
||||
fi
|
||||
|
||||
if test -z "$CC" ; then
|
||||
export CC=gcc
|
||||
fi
|
||||
|
||||
# Use ccache, if available
|
||||
if ccache -V > /dev/null 2>&1
|
||||
then
|
||||
if ! (echo "$CC" | grep "ccache" > /dev/null)
|
||||
then
|
||||
export CC="ccache $CC"
|
||||
fi
|
||||
if ! (echo "$CXX" | grep "ccache" > /dev/null)
|
||||
then
|
||||
export CXX="ccache $CXX"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Make sure to enable all features that affect "make dist"
|
||||
./configure \
|
||||
--with-embedded-server \
|
||||
--with-berkeley-db \
|
||||
--with-innodb \
|
||||
--enable-thread-safe-client \
|
||||
--with-ndbcluster
|
||||
make
|
@ -103,6 +103,7 @@ lenz@mysql.com
|
||||
magnus@neptunus.(none)
|
||||
magnus@shellback.(none)
|
||||
marko@hundin.mysql.fi
|
||||
marty@linux.site
|
||||
mats@mysql.com
|
||||
matt@booty.(none)
|
||||
matt@mysql.com
|
||||
|
@ -26,7 +26,7 @@ else
|
||||
}
|
||||
|
||||
# Some predefined settings
|
||||
$build_command= "BUILD/compile-pentium-max";
|
||||
$build_command= "BUILD/compile-dist";
|
||||
$PWD= cwd();
|
||||
$opt_docdir= $PWD . "/mysqldoc";
|
||||
$opt_archive_log= undef;
|
||||
@ -70,7 +70,7 @@ GetOptions(
|
||||
"test|t",
|
||||
"verbose|v",
|
||||
"win-dist|w",
|
||||
"quiet|q",
|
||||
"quiet|q",
|
||||
) || print_help("");
|
||||
|
||||
#
|
||||
@ -122,18 +122,8 @@ if (($opt_directory ne $PWD) && (!-d $opt_directory && !$opt_dry_run))
|
||||
#
|
||||
if ($opt_pull)
|
||||
{
|
||||
&logger("Updating BK tree $REPO to latest ChangeSet first");
|
||||
chdir ($REPO) or &abort("Could not chdir to $REPO!");
|
||||
&run_command("bk pull", "Could not update $REPO!");
|
||||
chdir ($PWD) or &abort("Could not chdir to $PWD!");
|
||||
|
||||
unless ($opt_skip_manual)
|
||||
{
|
||||
&logger("Updating manual tree in $opt_docdir");
|
||||
chdir ($opt_docdir) or &abort("Could not chdir to $opt_docdir!");
|
||||
&run_command("bk pull", "Could not update $opt_docdir!");
|
||||
chdir ($PWD) or &abort("Could not chdir to $PWD!");
|
||||
}
|
||||
&bk_pull("$REPO");
|
||||
&bk_pull("$opt_docdir") unless ($opt_skip_manual);
|
||||
}
|
||||
|
||||
#
|
||||
@ -270,7 +260,7 @@ if (defined $opt_changelog)
|
||||
$command.= " " . $REPO . " > $target_dir/ChangeLog";
|
||||
&logger($command);
|
||||
# We cannot use run_command here because of output redirection
|
||||
if (!$opt_dry_run)
|
||||
unless ($opt_dry_run)
|
||||
{
|
||||
system($command) == 0 or &abort("Could not create $target_dir/ChangeLog!");
|
||||
}
|
||||
@ -281,17 +271,17 @@ if (defined $opt_changelog)
|
||||
#
|
||||
unless ($opt_skip_manual)
|
||||
{
|
||||
$msg= "Updating manual files";
|
||||
&logger($msg);
|
||||
&logger("Updating manual files");
|
||||
foreach $file qw/internals manual reservedwords/
|
||||
{
|
||||
system ("bk cat $opt_docdir/Docs/$file.texi > $target_dir/Docs/$file.texi") == 0
|
||||
or &abort("Could not update $file.texi in $target_dir/Docs/!");
|
||||
}
|
||||
system ("rm -f $target_dir/Docs/Images/Makefile*") == 0
|
||||
or &abort("Could not remove Makefiles in $target_dir/Docs/Images/!");
|
||||
system ("cp $opt_docdir/Docs/Images/*.* $target_dir/Docs/Images") == 0
|
||||
or &abort("Could not copy image files in $target_dir/Docs/Images/!");
|
||||
|
||||
&run_command("rm -f $target_dir/Docs/Images/Makefile*",
|
||||
"Could not remove Makefiles in $target_dir/Docs/Images/!");
|
||||
&run_command("cp $opt_docdir/Docs/Images/*.* $target_dir/Docs/Images",
|
||||
"Could not copy image files in $target_dir/Docs/Images/!");
|
||||
}
|
||||
|
||||
#
|
||||
@ -377,6 +367,18 @@ if ($opt_archive_log)
|
||||
|
||||
exit 0;
|
||||
|
||||
#
|
||||
# Run a BK pull on the given BK tree
|
||||
#
|
||||
sub bk_pull
|
||||
{
|
||||
my $bk_tree= $_[0];
|
||||
&logger("Updating BK tree $bk_tree to latest ChangeSet first");
|
||||
chdir ($bk_tree) or &abort("Could not chdir to $bk_tree!");
|
||||
&run_command("bk pull", "Could not update $bk_tree!");
|
||||
chdir ($PWD) or &abort("Could not chdir to $PWD!");
|
||||
}
|
||||
|
||||
#
|
||||
# Print the help text message (with an optional message on top)
|
||||
#
|
||||
|
@ -2990,8 +2990,7 @@ recv_reset_log_files_for_backup(
|
||||
memcpy(name + log_dir_len, logfilename, sizeof logfilename);
|
||||
|
||||
buf = ut_malloc(LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE);
|
||||
memset(buf, LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE, '\0');
|
||||
|
||||
memset(buf, '\0', LOG_FILE_HDR_SIZE + OS_FILE_LOG_BLOCK_SIZE);
|
||||
|
||||
for (i = 0; i < n_log_files; i++) {
|
||||
|
||||
|
@ -381,8 +381,14 @@ row_upd_changes_field_size_or_external(
|
||||
new_len = new_val->len;
|
||||
|
||||
if (new_len == UNIV_SQL_NULL) {
|
||||
/* A bug fixed on Dec 31st, 2004: we looked at the
|
||||
SQL NULL size from the wrong field! We may backport
|
||||
this fix also to 4.0. The merge to 5.0 will be made
|
||||
manually immediately after we commit this to 4.1. */
|
||||
|
||||
new_len = dtype_get_sql_null_size(
|
||||
dict_index_get_nth_type(index, i));
|
||||
dict_index_get_nth_type(index,
|
||||
upd_field->field_no));
|
||||
}
|
||||
|
||||
old_len = rec_get_nth_field_size(rec, upd_field->field_no);
|
||||
|
@ -95,6 +95,7 @@ $Devel::Trace::TRACE= 1;
|
||||
|
||||
my @skip_if_embedded_server=
|
||||
(
|
||||
"alter_table",
|
||||
"bdb-deadlock",
|
||||
"connect",
|
||||
"flush_block_commit",
|
||||
@ -148,6 +149,7 @@ our @mysqld_src_dirs=
|
||||
|
||||
our $glob_win32= 0;
|
||||
our $glob_mysql_test_dir= undef;
|
||||
our $glob_mysql_bench_dir= undef;
|
||||
our $glob_hostname= undef;
|
||||
our $glob_scriptname= undef;
|
||||
our $glob_use_running_server= 0;
|
||||
@ -237,6 +239,8 @@ our $opt_skip_test;
|
||||
|
||||
our $opt_sleep;
|
||||
|
||||
our $opt_ps_protocol;
|
||||
|
||||
# FIXME all of the sleep time handling needs cleanup
|
||||
our $opt_sleep_time_after_restart= 1;
|
||||
our $opt_sleep_time_for_delete= 10;
|
||||
@ -301,7 +305,7 @@ sub mysqld_arguments ($$$$$);
|
||||
sub stop_masters_slaves ();
|
||||
sub stop_masters ();
|
||||
sub stop_slaves ();
|
||||
sub run_mysqltest ($);
|
||||
sub run_mysqltest ($$);
|
||||
|
||||
######################################################################
|
||||
#
|
||||
@ -396,6 +400,7 @@ sub initial_setup () {
|
||||
# 'basedir' is always parent of "mysql-test" directory
|
||||
$glob_mysql_test_dir= cwd();
|
||||
$glob_basedir= dirname($glob_mysql_test_dir);
|
||||
$glob_mysql_bench_dir= "$glob_basedir/mysql-bench"; # FIXME make configurable
|
||||
|
||||
$path_timefile= "$glob_mysql_test_dir/var/log/mysqltest-time";
|
||||
|
||||
@ -441,6 +446,7 @@ sub command_line_setup () {
|
||||
'debug' => \$opt_debug,
|
||||
'do-test=s' => \$opt_do_test,
|
||||
'embedded-server' => \$opt_embedded_server,
|
||||
'ps-protocol' => \$opt_ps_protocol,
|
||||
'extern' => \$opt_extern,
|
||||
'fast' => \$opt_fast,
|
||||
'force' => \$opt_force,
|
||||
@ -458,6 +464,7 @@ sub command_line_setup () {
|
||||
'netware' => \$opt_netware,
|
||||
'no-manager' => \$opt_no_manager,
|
||||
'old-master' => \$opt_old_master,
|
||||
'ps-protocol' => \$opt_ps_protocol,
|
||||
'record' => \$opt_record,
|
||||
'script-debug' => \$opt_script_debug,
|
||||
'skip-rpl' => \$opt_skip_rpl,
|
||||
@ -526,7 +533,7 @@ sub command_line_setup () {
|
||||
|
||||
if ( $opt_extern and $opt_local )
|
||||
{
|
||||
die "Can't use --extern and --local at the same time";
|
||||
mtr_error("Can't use --extern and --local at the same time");
|
||||
}
|
||||
|
||||
if ( ! $opt_socket )
|
||||
@ -568,7 +575,7 @@ sub command_line_setup () {
|
||||
|
||||
if ( $opt_extern )
|
||||
{
|
||||
die "Can't use --extern with --embedded-server";
|
||||
mtr_error("Can't use --extern with --embedded-server");
|
||||
}
|
||||
$opt_result_ext= ".es";
|
||||
}
|
||||
@ -589,12 +596,14 @@ sub command_line_setup () {
|
||||
$opt_sleep_time_after_restart= $opt_sleep;
|
||||
}
|
||||
|
||||
if ( $opt_gcov )
|
||||
if ( $opt_gcov and ! $opt_source_dist )
|
||||
{
|
||||
if ( $opt_source_dist )
|
||||
{
|
||||
die "Coverage test needs the source - please use source dist";
|
||||
}
|
||||
mtr_error("Coverage test needs the source - please use source dist");
|
||||
}
|
||||
|
||||
if ( $glob_use_embedded_server and ! $opt_source_dist )
|
||||
{
|
||||
mtr_error("Embedded server needs source tree - please use source dist");
|
||||
}
|
||||
|
||||
if ( $opt_gdb )
|
||||
@ -602,7 +611,7 @@ sub command_line_setup () {
|
||||
$opt_wait_timeout= 300;
|
||||
if ( $opt_extern )
|
||||
{
|
||||
die "Can't use --extern with --gdb";
|
||||
mtr_error("Can't use --extern with --gdb");
|
||||
}
|
||||
}
|
||||
|
||||
@ -611,7 +620,7 @@ sub command_line_setup () {
|
||||
$opt_gdb= 1;
|
||||
if ( $opt_extern )
|
||||
{
|
||||
die "Can't use --extern with --manual-gdb";
|
||||
mtr_error("Can't use --extern with --manual-gdb");
|
||||
}
|
||||
}
|
||||
|
||||
@ -619,7 +628,7 @@ sub command_line_setup () {
|
||||
{
|
||||
if ( $opt_extern )
|
||||
{
|
||||
die "Can't use --extern with --ddd";
|
||||
mtr_error("Can't use --extern with --ddd");
|
||||
}
|
||||
}
|
||||
|
||||
@ -689,10 +698,10 @@ sub executable_setup () {
|
||||
{
|
||||
mtr_error("Cannot find embedded server 'mysqltest'");
|
||||
}
|
||||
$path_tests_bindir= "$glob_basedir/libmysqld/examples";
|
||||
}
|
||||
else
|
||||
{
|
||||
$exe_mysqld= "$glob_basedir/sql/mysqld";
|
||||
if ( -f "$glob_basedir/client/.libs/lt-mysqltest" )
|
||||
{
|
||||
$exe_mysqltest= "$glob_basedir/client/.libs/lt-mysqltest";
|
||||
@ -705,6 +714,7 @@ sub executable_setup () {
|
||||
{
|
||||
$exe_mysqltest= "$glob_basedir/client/mysqltest";
|
||||
}
|
||||
$path_tests_bindir= "$glob_basedir/tests";
|
||||
}
|
||||
if ( -f "$glob_basedir/client/.libs/mysqldump" )
|
||||
{
|
||||
@ -723,8 +733,8 @@ sub executable_setup () {
|
||||
$exe_mysqlbinlog= "$glob_basedir/client/mysqlbinlog";
|
||||
}
|
||||
|
||||
$exe_mysqld= "$glob_basedir/sql/mysqld";
|
||||
$path_client_bindir= "$glob_basedir/client";
|
||||
$path_tests_bindir= "$glob_basedir/tests";
|
||||
$exe_mysqladmin= "$path_client_bindir/mysqladmin";
|
||||
$exe_mysql= "$path_client_bindir/mysql";
|
||||
$path_language= "$glob_basedir/sql/share/english/";
|
||||
@ -791,7 +801,7 @@ sub handle_int_signal () {
|
||||
$SIG{INT}= 'DEFAULT'; # If we get a ^C again, we die...
|
||||
mtr_warning("got INT signal, cleaning up.....");
|
||||
stop_masters_slaves();
|
||||
exit(1);
|
||||
mtr_error("We die from ^C signal from user");
|
||||
}
|
||||
|
||||
|
||||
@ -806,7 +816,7 @@ sub collect_test_cases () {
|
||||
|
||||
my @tests; # Array of hash, will be array of C struct
|
||||
|
||||
opendir(TESTDIR, $testdir) or die "Can't open dir \"$testdir\": $!";
|
||||
opendir(TESTDIR, $testdir) or mtr_error("Can't open dir \"$testdir\": $!");
|
||||
|
||||
foreach my $elem ( sort readdir(TESTDIR) ) {
|
||||
my $tname= mtr_match_extension($elem,"test");
|
||||
@ -1066,7 +1076,7 @@ sub sleep_until_file_created ($$) {
|
||||
|
||||
if ( ! -r $pidfile )
|
||||
{
|
||||
die "No $pidfile was created";
|
||||
mtr_error("No $pidfile was created");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1084,7 +1094,7 @@ sub ndbcluster_start () {
|
||||
mtr_report("Starting ndbcluster");
|
||||
my $ndbcluster_opts= $opt_bench ? "" : "--small";
|
||||
# FIXME check result code?!
|
||||
mtr_run("./ndb/ndbcluster",
|
||||
mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
|
||||
["--port-base=$opt_ndbcluster_port",
|
||||
$ndbcluster_opts,
|
||||
"--diskless",
|
||||
@ -1094,7 +1104,7 @@ sub ndbcluster_start () {
|
||||
}
|
||||
|
||||
sub ndbcluster_stop () {
|
||||
mtr_run("./ndb/ndbcluster",
|
||||
mtr_run("$glob_mysql_test_dir/ndb/ndbcluster",
|
||||
["--data-dir=$glob_mysql_test_dir/var",
|
||||
"--port-base=$opt_ndbcluster_port",
|
||||
"--stop"],
|
||||
@ -1142,17 +1152,17 @@ sub run_benchmarks ($) {
|
||||
if ( ! $benchmark )
|
||||
{
|
||||
mtr_add_arg($args, "--log");
|
||||
mtr_run("./run-all-tests", $args, "", "", "", "");
|
||||
mtr_run("$glob_mysql_bench_dir/run-all-tests", $args, "", "", "", "");
|
||||
# FIXME check result code?!
|
||||
}
|
||||
elsif ( -x $benchmark )
|
||||
{
|
||||
mtr_run("./$benchmark", $args, "", "", "", "");
|
||||
mtr_run("$glob_mysql_bench_dir/$benchmark", $args, "", "", "", "");
|
||||
# FIXME check result code?!
|
||||
}
|
||||
else
|
||||
{
|
||||
mtr_error("benchmark $benchmark not found");
|
||||
mtr_error("Benchmark $benchmark not found");
|
||||
}
|
||||
|
||||
chdir($glob_mysql_test_dir); # Go back
|
||||
@ -1172,6 +1182,8 @@ sub run_benchmarks ($) {
|
||||
|
||||
sub run_tests () {
|
||||
|
||||
mtr_report("Finding Tests");
|
||||
|
||||
my $tests= collect_test_cases();
|
||||
|
||||
mtr_report("Starting Tests");
|
||||
@ -1255,7 +1267,7 @@ sub install_db ($$) {
|
||||
if ( mtr_run($exe_mysqld, $args, $init_db_sql,
|
||||
$path_manager_log, $path_manager_log, "") != 0 )
|
||||
{
|
||||
mtr_error("error executing mysqld --bootstrap\n" .
|
||||
mtr_error("Error executing mysqld --bootstrap\n" .
|
||||
"Could not install $type test DBs");
|
||||
}
|
||||
}
|
||||
@ -1293,6 +1305,7 @@ sub run_testcase ($) {
|
||||
|
||||
if ( $tinfo->{'skip'} )
|
||||
{
|
||||
mtr_report_test_name($tinfo);
|
||||
mtr_report_test_skipped($tinfo);
|
||||
return;
|
||||
}
|
||||
@ -1323,14 +1336,24 @@ sub run_testcase ($) {
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
stop_slaves();
|
||||
}
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Start masters
|
||||
# ----------------------------------------------------------------------
|
||||
# ----------------------------------------------------------------------
|
||||
# Prepare to start masters. Even if we use embedded, we want to run
|
||||
# the preparation.
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
mtr_tofile($master->[0]->{'path_myerr'},"CURRENT_TEST: $tname\n");
|
||||
do_before_start_master($tname,$tinfo->{'master_sh'});
|
||||
mtr_tofile($master->[0]->{'path_myerr'},"CURRENT_TEST: $tname\n");
|
||||
do_before_start_master($tname,$tinfo->{'master_sh'});
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Start masters
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
mtr_report_test_name($tinfo);
|
||||
|
||||
if ( ! $glob_use_running_server and ! $glob_use_embedded_server )
|
||||
{
|
||||
# FIXME give the args to the embedded server?!
|
||||
# FIXME what does $opt_local_master mean?!
|
||||
# FIXME split up start and check that started so that can do
|
||||
@ -1385,9 +1408,7 @@ sub run_testcase ($) {
|
||||
unlink("r/$tname.reject");
|
||||
unlink($path_timefile);
|
||||
|
||||
mtr_report_test_name($tinfo);
|
||||
|
||||
my $res= run_mysqltest($tinfo);
|
||||
my $res= run_mysqltest($tinfo, $tinfo->{'master_opt'});
|
||||
|
||||
if ( $res == 0 )
|
||||
{
|
||||
@ -1470,7 +1491,7 @@ sub do_before_start_master ($$) {
|
||||
if ( $master_init_script and
|
||||
mtr_run($master_init_script, [], "", "", "", "") != 0 )
|
||||
{
|
||||
mtr_error("can't run $master_init_script");
|
||||
mtr_error("Can't run $master_init_script");
|
||||
}
|
||||
# for gcov FIXME needed? If so we need more absolute paths
|
||||
# chdir($glob_basedir);
|
||||
@ -1501,7 +1522,7 @@ sub do_before_start_slave ($$) {
|
||||
if ( $slave_init_script and
|
||||
mtr_run($slave_init_script, [], "", "", "", "") != 0 )
|
||||
{
|
||||
mtr_error("can't run $slave_init_script");
|
||||
mtr_error("Can't run $slave_init_script");
|
||||
}
|
||||
|
||||
unlink("$glob_mysql_test_dir/var/slave-data/log.*");
|
||||
@ -1525,9 +1546,11 @@ sub mysqld_arguments ($$$$$) {
|
||||
if ( $glob_use_embedded_server )
|
||||
{
|
||||
$prefix= "--server-arg=";
|
||||
} else {
|
||||
# We can't pass embedded server --no-defaults
|
||||
mtr_add_arg($args, "%s--no-defaults", $prefix);
|
||||
}
|
||||
|
||||
mtr_add_arg($args, "%s--no-defaults", $prefix);
|
||||
mtr_add_arg($args, "%s--basedir=%s", $prefix, $path_my_basedir);
|
||||
mtr_add_arg($args, "%s--character-sets-dir=%s", $prefix, $path_charsetsdir);
|
||||
mtr_add_arg($args, "%s--core", $prefix);
|
||||
@ -1815,7 +1838,7 @@ sub mysqld_start ($$$$) {
|
||||
}
|
||||
}
|
||||
|
||||
die "Can't start mysqld FIXME";
|
||||
mtr_error("Can't start mysqld FIXME");
|
||||
}
|
||||
|
||||
sub stop_masters_slaves () {
|
||||
@ -1870,8 +1893,9 @@ sub stop_slaves () {
|
||||
}
|
||||
|
||||
|
||||
sub run_mysqltest ($) {
|
||||
my $tinfo= shift;
|
||||
sub run_mysqltest ($$) {
|
||||
my $tinfo= shift;
|
||||
my $master_opts= shift;
|
||||
|
||||
# FIXME set where????
|
||||
my $cmdline_mysqldump= "$exe_mysqldump --no-defaults -uroot " .
|
||||
@ -1901,19 +1925,11 @@ sub run_mysqltest ($) {
|
||||
$ENV{'CLIENT_BINDIR'}= $path_client_bindir;
|
||||
$ENV{'TESTS_BINDIR'}= $path_tests_bindir;
|
||||
|
||||
my $exe= $exe_mysqltest;
|
||||
my $args; # Arg vector
|
||||
my $exe= $exe_mysqltest;
|
||||
my $args;
|
||||
|
||||
mtr_init_args(\$args);
|
||||
|
||||
if ( $opt_strace_client )
|
||||
{
|
||||
$exe= "strace"; # FIXME there are ktrace, ....
|
||||
mtr_add_arg($args, "-o");
|
||||
mtr_add_arg($args, "%s/var/log/mysqltest.strace", $glob_mysql_test_dir);
|
||||
mtr_add_arg($args, "$exe_mysqltest");
|
||||
}
|
||||
|
||||
mtr_add_arg($args, "--no-defaults");
|
||||
mtr_add_arg($args, "--socket=%s", $master->[0]->{'path_mysock'});
|
||||
mtr_add_arg($args, "--database=test");
|
||||
@ -1925,6 +1941,19 @@ sub run_mysqltest ($) {
|
||||
mtr_add_arg($args, "--tmpdir=%s", $opt_tmpdir);
|
||||
mtr_add_arg($args, "--port=%d", $master->[0]->{'path_myport'});
|
||||
|
||||
if ( $opt_ps_protocol )
|
||||
{
|
||||
mtr_add_arg($args, "--ps-protocol");
|
||||
}
|
||||
|
||||
if ( $opt_strace_client )
|
||||
{
|
||||
$exe= "strace"; # FIXME there are ktrace, ....
|
||||
mtr_add_arg($args, "-o");
|
||||
mtr_add_arg($args, "%s/var/log/mysqltest.strace", $glob_mysql_test_dir);
|
||||
mtr_add_arg($args, "$exe_mysqltest");
|
||||
}
|
||||
|
||||
if ( $opt_timer )
|
||||
{
|
||||
mtr_add_arg($args, "--timer-file=var/log/timer");
|
||||
@ -1966,6 +1995,10 @@ sub run_mysqltest ($) {
|
||||
mtr_add_arg($args, "-R");
|
||||
mtr_add_arg($args, $tinfo->{'result_file'});
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# If embedded server, we create server args to give mysqltest to pass on
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
if ( $glob_use_embedded_server )
|
||||
{
|
||||
mysqld_arguments($args,'master',0,$tinfo->{'master_opt'},[]);
|
||||
|
@ -212,6 +212,13 @@ Thd_ndb::~Thd_ndb()
|
||||
{
|
||||
if (ndb)
|
||||
delete ndb;
|
||||
ndb= 0;
|
||||
}
|
||||
|
||||
inline
|
||||
Ndb *ha_ndbcluster::get_ndb()
|
||||
{
|
||||
return ((Thd_ndb*)current_thd->transaction.thd_ndb)->ndb;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -245,8 +252,9 @@ void ha_ndbcluster::records_update()
|
||||
info->no_uncommitted_rows_count));
|
||||
// if (info->records == ~(ha_rows)0)
|
||||
{
|
||||
Ndb *ndb= get_ndb();
|
||||
Uint64 rows;
|
||||
if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){
|
||||
if(ndb_get_table_statistics(ndb, m_tabname, &rows, 0) == 0){
|
||||
info->records= rows;
|
||||
}
|
||||
}
|
||||
@ -331,7 +339,8 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans)
|
||||
switch (err.classification) {
|
||||
case NdbError::SchemaError:
|
||||
{
|
||||
NDBDICT *dict= m_ndb->getDictionary();
|
||||
Ndb *ndb= get_ndb();
|
||||
NDBDICT *dict= ndb->getDictionary();
|
||||
DBUG_PRINT("info", ("invalidateTable %s", m_tabname));
|
||||
dict->invalidateTable(m_tabname);
|
||||
table->version=0L; /* Free when thread is ready */
|
||||
@ -361,7 +370,7 @@ bool ha_ndbcluster::get_error_message(int error,
|
||||
DBUG_ENTER("ha_ndbcluster::get_error_message");
|
||||
DBUG_PRINT("enter", ("error: %d", error));
|
||||
|
||||
Ndb *ndb= ((Thd_ndb*)current_thd->transaction.thd_ndb)->ndb;
|
||||
Ndb *ndb= get_ndb();
|
||||
if (!ndb)
|
||||
DBUG_RETURN(FALSE);
|
||||
|
||||
@ -690,7 +699,8 @@ bool ha_ndbcluster::uses_blob_value(bool all_fields)
|
||||
|
||||
int ha_ndbcluster::get_metadata(const char *path)
|
||||
{
|
||||
NDBDICT *dict= m_ndb->getDictionary();
|
||||
Ndb *ndb= get_ndb();
|
||||
NDBDICT *dict= ndb->getDictionary();
|
||||
const NDBTAB *tab;
|
||||
int error;
|
||||
bool invalidating_ndb_table= FALSE;
|
||||
@ -765,7 +775,8 @@ int ha_ndbcluster::build_index_list(TABLE *tab, enum ILBP phase)
|
||||
static const char* unique_suffix= "$unique";
|
||||
KEY* key_info= tab->key_info;
|
||||
const char **key_name= tab->keynames.type_names;
|
||||
NdbDictionary::Dictionary *dict= m_ndb->getDictionary();
|
||||
Ndb *ndb= get_ndb();
|
||||
NdbDictionary::Dictionary *dict= ndb->getDictionary();
|
||||
DBUG_ENTER("build_index_list");
|
||||
|
||||
// Save information about all known indexes
|
||||
@ -1735,7 +1746,8 @@ int ha_ndbcluster::write_row(byte *record)
|
||||
if (table->primary_key == MAX_KEY)
|
||||
{
|
||||
// Table has hidden primary key
|
||||
Uint64 auto_value= m_ndb->getAutoIncrementValue((const NDBTAB *) m_table);
|
||||
Ndb *ndb= get_ndb();
|
||||
Uint64 auto_value= ndb->getAutoIncrementValue((const NDBTAB *) m_table);
|
||||
if (set_hidden_key(op, table->fields, (const byte*)&auto_value))
|
||||
ERR_RETURN(op->getNdbError());
|
||||
}
|
||||
@ -1812,11 +1824,12 @@ int ha_ndbcluster::write_row(byte *record)
|
||||
}
|
||||
if ((has_auto_increment) && (m_skip_auto_increment))
|
||||
{
|
||||
Ndb *ndb= get_ndb();
|
||||
Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1;
|
||||
DBUG_PRINT("info",
|
||||
("Trying to set next auto increment value to %lu",
|
||||
(ulong) next_val));
|
||||
if (m_ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE))
|
||||
if (ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE))
|
||||
DBUG_PRINT("info",
|
||||
("Setting next auto increment value to %u", next_val));
|
||||
}
|
||||
@ -2657,9 +2670,12 @@ void ha_ndbcluster::info(uint flag)
|
||||
}
|
||||
else
|
||||
{
|
||||
if ((my_errno= check_ndb_connection()))
|
||||
DBUG_VOID_RETURN;
|
||||
Ndb *ndb= get_ndb();
|
||||
Uint64 rows= 100;
|
||||
if (current_thd->variables.ndb_use_exact_count)
|
||||
ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0);
|
||||
ndb_get_table_statistics(ndb, m_tabname, &rows, 0);
|
||||
records= rows;
|
||||
}
|
||||
}
|
||||
@ -2986,6 +3002,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
||||
DBUG_RETURN(1);
|
||||
|
||||
Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb;
|
||||
Ndb *ndb= thd_ndb->ndb;
|
||||
|
||||
DBUG_PRINT("enter", ("transaction.thd_ndb->lock_count: %d",
|
||||
thd_ndb->lock_count));
|
||||
@ -3003,9 +3020,9 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
||||
DBUG_ASSERT(!thd->transaction.stmt.ndb_tid);
|
||||
DBUG_PRINT("trans",("Starting transaction stmt"));
|
||||
|
||||
trans= m_ndb->startTransaction();
|
||||
trans= ndb->startTransaction();
|
||||
if (trans == NULL)
|
||||
ERR_RETURN(m_ndb->getNdbError());
|
||||
ERR_RETURN(ndb->getNdbError());
|
||||
no_uncommitted_rows_reset(thd);
|
||||
thd->transaction.stmt.ndb_tid= trans;
|
||||
}
|
||||
@ -3017,9 +3034,9 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
||||
// A "master" transaction ha not been started yet
|
||||
DBUG_PRINT("trans",("starting transaction, all"));
|
||||
|
||||
trans= m_ndb->startTransaction();
|
||||
trans= ndb->startTransaction();
|
||||
if (trans == NULL)
|
||||
ERR_RETURN(m_ndb->getNdbError());
|
||||
ERR_RETURN(ndb->getNdbError());
|
||||
no_uncommitted_rows_reset(thd);
|
||||
|
||||
/*
|
||||
@ -3069,7 +3086,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
||||
m_retrieve_primary_key= FALSE;
|
||||
m_ops_pending= 0;
|
||||
{
|
||||
NDBDICT *dict= m_ndb->getDictionary();
|
||||
NDBDICT *dict= ndb->getDictionary();
|
||||
const NDBTAB *tab;
|
||||
void *tab_info;
|
||||
if (!(tab= dict->getTable(m_tabname, &tab_info)))
|
||||
@ -3096,7 +3113,7 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
||||
We must in this case close the transaction to release resources
|
||||
*/
|
||||
DBUG_PRINT("trans",("ending non-updating transaction"));
|
||||
m_ndb->closeTransaction(m_active_trans);
|
||||
ndb->closeTransaction(m_active_trans);
|
||||
thd->transaction.stmt.ndb_tid= 0;
|
||||
}
|
||||
}
|
||||
@ -3142,16 +3159,17 @@ int ha_ndbcluster::start_stmt(THD *thd)
|
||||
|
||||
NdbConnection *trans= (NdbConnection*)thd->transaction.stmt.ndb_tid;
|
||||
if (!trans){
|
||||
Ndb *ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb;
|
||||
DBUG_PRINT("trans",("Starting transaction stmt"));
|
||||
|
||||
NdbConnection *tablock_trans=
|
||||
(NdbConnection*)thd->transaction.all.ndb_tid;
|
||||
DBUG_PRINT("info", ("tablock_trans: %x", (uint)tablock_trans));
|
||||
DBUG_ASSERT(tablock_trans);
|
||||
// trans= m_ndb->hupp(tablock_trans);
|
||||
trans= m_ndb->startTransaction();
|
||||
// trans= ndb->hupp(tablock_trans);
|
||||
trans= ndb->startTransaction();
|
||||
if (trans == NULL)
|
||||
ERR_RETURN(m_ndb->getNdbError());
|
||||
ERR_RETURN(ndb->getNdbError());
|
||||
no_uncommitted_rows_reset(thd);
|
||||
thd->transaction.stmt.ndb_tid= trans;
|
||||
}
|
||||
@ -3538,7 +3556,8 @@ int ha_ndbcluster::create(const char *name,
|
||||
DBUG_RETURN(my_errno);
|
||||
|
||||
// Create the table in NDB
|
||||
NDBDICT *dict= m_ndb->getDictionary();
|
||||
Ndb *ndb= get_ndb();
|
||||
NDBDICT *dict= ndb->getDictionary();
|
||||
if (dict->createTable(tab) != 0)
|
||||
{
|
||||
const NdbError err= dict->getNdbError();
|
||||
@ -3583,7 +3602,8 @@ int ha_ndbcluster::create_index(const char *name,
|
||||
KEY *key_info,
|
||||
bool unique)
|
||||
{
|
||||
NdbDictionary::Dictionary *dict= m_ndb->getDictionary();
|
||||
Ndb *ndb= get_ndb();
|
||||
NdbDictionary::Dictionary *dict= ndb->getDictionary();
|
||||
KEY_PART_INFO *key_part= key_info->key_part;
|
||||
KEY_PART_INFO *end= key_part + key_info->key_parts;
|
||||
|
||||
@ -3641,15 +3661,16 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
|
||||
|
||||
if (check_ndb_connection())
|
||||
DBUG_RETURN(my_errno= HA_ERR_NO_CONNECTION);
|
||||
|
||||
dict= m_ndb->getDictionary();
|
||||
|
||||
Ndb *ndb= get_ndb();
|
||||
dict= ndb->getDictionary();
|
||||
if (!(orig_tab= dict->getTable(m_tabname)))
|
||||
ERR_RETURN(dict->getNdbError());
|
||||
|
||||
m_table= (void *)orig_tab;
|
||||
// Change current database to that of target table
|
||||
set_dbname(to);
|
||||
m_ndb->setDatabaseName(m_dbname);
|
||||
ndb->setDatabaseName(m_dbname);
|
||||
if (!(result= alter_table_name(new_tabname)))
|
||||
{
|
||||
// Rename .ndb file
|
||||
@ -3666,7 +3687,8 @@ int ha_ndbcluster::rename_table(const char *from, const char *to)
|
||||
|
||||
int ha_ndbcluster::alter_table_name(const char *to)
|
||||
{
|
||||
NDBDICT * dict= m_ndb->getDictionary();
|
||||
Ndb *ndb= get_ndb();
|
||||
NDBDICT *dict= ndb->getDictionary();
|
||||
const NDBTAB *orig_tab= (const NDBTAB *) m_table;
|
||||
int ret;
|
||||
DBUG_ENTER("alter_table_name_table");
|
||||
@ -3708,8 +3730,9 @@ int ha_ndbcluster::delete_table(const char *name)
|
||||
|
||||
int ha_ndbcluster::drop_table()
|
||||
{
|
||||
NdbDictionary::Dictionary *dict= m_ndb->getDictionary();
|
||||
|
||||
Ndb *ndb= get_ndb();
|
||||
NdbDictionary::Dictionary *dict= ndb->getDictionary();
|
||||
|
||||
DBUG_ENTER("drop_table");
|
||||
DBUG_PRINT("enter", ("Deleting %s", m_tabname));
|
||||
|
||||
@ -3742,6 +3765,7 @@ longlong ha_ndbcluster::get_auto_increment()
|
||||
{
|
||||
DBUG_ENTER("get_auto_increment");
|
||||
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
|
||||
Ndb *ndb= get_ndb();
|
||||
int cache_size=
|
||||
(m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ?
|
||||
m_rows_to_insert - m_rows_inserted
|
||||
@ -3750,8 +3774,8 @@ longlong ha_ndbcluster::get_auto_increment()
|
||||
: m_autoincrement_prefetch;
|
||||
Uint64 auto_value=
|
||||
(m_skip_auto_increment) ?
|
||||
m_ndb->readAutoIncrementValue((const NDBTAB *) m_table)
|
||||
: m_ndb->getAutoIncrementValue((const NDBTAB *) m_table, cache_size);
|
||||
ndb->readAutoIncrementValue((const NDBTAB *) m_table)
|
||||
: ndb->getAutoIncrementValue((const NDBTAB *) m_table, cache_size);
|
||||
DBUG_RETURN((longlong)auto_value);
|
||||
}
|
||||
|
||||
@ -3764,7 +3788,6 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg):
|
||||
handler(table_arg),
|
||||
m_active_trans(NULL),
|
||||
m_active_cursor(NULL),
|
||||
m_ndb(NULL),
|
||||
m_table(NULL),
|
||||
m_table_info(NULL),
|
||||
m_table_flags(HA_REC_NOT_IN_SEQ |
|
||||
@ -3894,7 +3917,6 @@ int ha_ndbcluster::close(void)
|
||||
DBUG_ENTER("close");
|
||||
free_share(m_share); m_share= 0;
|
||||
release_metadata();
|
||||
m_ndb= NULL;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
@ -3956,11 +3978,12 @@ Ndb* check_ndb_in_thd(THD* thd)
|
||||
int ha_ndbcluster::check_ndb_connection()
|
||||
{
|
||||
THD* thd= current_thd;
|
||||
Ndb *ndb;
|
||||
DBUG_ENTER("check_ndb_connection");
|
||||
|
||||
if (!(m_ndb= check_ndb_in_thd(thd)))
|
||||
if (!(ndb= check_ndb_in_thd(thd)))
|
||||
DBUG_RETURN(HA_ERR_NO_CONNECTION);
|
||||
m_ndb->setDatabaseName(m_dbname);
|
||||
ndb->setDatabaseName(m_dbname);
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
|
@ -210,7 +210,6 @@ class ha_ndbcluster: public handler
|
||||
|
||||
NdbConnection *m_active_trans;
|
||||
NdbResultSet *m_active_cursor;
|
||||
Ndb *m_ndb;
|
||||
void *m_table;
|
||||
void *m_table_info;
|
||||
char m_dbname[FN_HEADLEN];
|
||||
@ -246,6 +245,7 @@ class ha_ndbcluster: public handler
|
||||
bool m_transaction_on;
|
||||
bool m_use_local_query_cache;
|
||||
|
||||
Ndb *get_ndb();
|
||||
void set_rec_per_key();
|
||||
void records_update();
|
||||
void no_uncommitted_rows_execute_failure();
|
||||
|
Reference in New Issue
Block a user