1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-01 03:47:19 +03:00
BitKeeper/etc/ignore:
  auto-union
BitKeeper/etc/logging_ok:
  auto-union
sql-bench/server-cfg.sh:
  Auto merged
sql/sql_base.cc:
  Auto merged
sql/sql_insert.cc:
  Auto merged
sql/sql_table.cc:
  Auto merged
This commit is contained in:
unknown
2001-06-12 14:12:35 +02:00
60 changed files with 2224 additions and 574 deletions

View File

@ -314,11 +314,15 @@ sql-bench/Results-linux/ATIS-mysql_bdb-Linux_2.2.14_my_SMP_i686
sql-bench/bench-count-distinct
sql-bench/bench-init.pl
sql-bench/compare-results
sql-bench/compare-results-all
sql-bench/copy-db
sql-bench/crash-me
sql-bench/gif/*
sql-bench/graph-compare-results
sql-bench/output/*
sql-bench/run-all-tests
sql-bench/server-cfg
sql-bench/template.html
sql-bench/test-ATIS
sql-bench/test-alter-table
sql-bench/test-big-tables

View File

@ -3,6 +3,7 @@
#shift
TO=dev-public@mysql.com
FROM=$USER@mysql.com
INTERNALS=internals@lists.mysql.com
LIMIT=10000
if [ "$REAL_EMAIL" = "" ]
@ -24,6 +25,23 @@ From: $FROM
To: $TO
Subject: bk commit - 4.0 tree
EOF
bk changes -v -r+
bk cset -r+ -d
) | head -n $LIMIT | /usr/sbin/sendmail -t
echo "Notifying internals list at $INTERNALS"
(
cat <<EOF
List-ID: <bk.mysql>
From: $FROM
To: $INTERNALS
Subject: bk commit into 3.23 tree
Below is the list of changes that have just been pushed into main
3.23. repository. For information on how to access the repository
see http://www.mysql.com/doc/I/n/Installing_source_tree.html
>>>>>>> BitKeeper/tmp/post-commit_sasha@1.8.1.3
EOF
bk changes -v -r+
bk cset -r+ -d

View File

@ -4,10 +4,10 @@ use Getopt::Long;
$opt_distribution=$opt_user=$opt_result=$opt_config_options=$opt_config_env="";
$opt_dbd_options=$opt_perl_options=$opt_suffix="";
$opt_tmp=$version_suffix="";
$opt_help=$opt_Information=$opt_no_delete=$opt_debug=$opt_stage=$opt_rsh_mail=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_no_mysqltest=$opt_use_old_distribution=0;
$opt_help=$opt_Information=$opt_no_delete=$opt_debug=$opt_stage=$opt_rsh_mail=$opt_no_test=$opt_no_perl=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_no_mysqltest=$opt_use_old_distribution=$opt_enable_shared=$opt_no_crash_me=$opt_no_strip=0;
$opt_innodb=$opt_bdb=0;
GetOptions("Information","help","distribution=s","user=s","result=s","no-delete","no-test","no-mysqltest","perl-files=s","debug","config-options=s","config-env=s","stage=i","rsh-mail","with-low-memory","fast-benchmark","tmp=s","static-client","static-server","static-perl","no-perl","local-perl","perl-options=s","sur","with-small-disk","dbd-options=s","tcpip","suffix=s","build-thread=i","innodb","bdb","use-old-distribution") || usage();
GetOptions("Information","help","distribution=s","user=s","result=s","no-delete","no-test","no-mysqltest","perl-files=s","debug","config-options=s","config-env=s","stage=i","rsh-mail","with-low-memory","fast-benchmark","tmp=s","static-client","static-server","static-perl","no-perl","local-perl","perl-options=s","sur","with-small-disk","dbd-options=s","tcpip","suffix=s","build-thread=i","innodb","bdb","use-old-distribution","enable-shared","no-crash-me","no-strip") || usage();
usage() if ($opt_help || $opt_Information);
usage() if (!$opt_distribution);
@ -19,7 +19,7 @@ if ($opt_innodb || $opt_bdb)
chomp($host=`hostname`);
$full_host_name=$host;
print "$host: Compiling MySQL$version_suffix at $host$suffix, stage: $opt_stage\n" if ($opt_debug);
info("Compiling MySQL$version_suffix at $host$suffix, stage: $opt_stage\n");
$connect_option= ($opt_tcpip ? "--host=$host" : "");
$host =~ /^([^.-]*)/;
$host=$1 . $opt_suffix;
@ -119,7 +119,10 @@ if ($opt_stage <= 1)
{
$opt_config_options.=" --with-client-ldflags=-all-static";
}
$opt_config_options.= " --disable-shared"; # Default for binary versions
if (!$opt_enable_shared)
{
$opt_config_options.= " --disable-shared"; # Default for binary versions
}
if ($opt_bdb)
{
$opt_config_options.= " --with-berkeley-db"
@ -146,10 +149,13 @@ if ($opt_stage <= 2)
#
if ($opt_stage <= 3)
{
my ($flags);
log_system("rm -fr mysql-3* mysql-4* $pwd/$host/*.tar.gz");
log_system("nm -n sql/mysqld | gzip -9 -v 2>&1 > sql/mysqld.sym.gz | cat");
log_system("strip sql/mysqld extra/comp_err client/mysql sql/mysqld client/mysqlshow extra/replace isam/isamchk client/mysqladmin client/mysqldump extra/perror");
check_system("scripts/make_binary_distribution $opt_tmp $opt_suffix",".tar.gz created");
$flags="";
$flags.="--no-strip" if ($opt_no_strip);
check_system("scripts/make_binary_distribution --tmp=$opt_tmp --suffix=$opt_suffix $flags",".tar.gz created");
safe_system("mv mysql*.tar.gz $pwd/$host");
safe_system("cp client/mysqladmin $pwd/$host/bin");
safe_system("$make clean") if ($opt_with_small_disk);
@ -174,6 +180,7 @@ if ($opt_stage <= 4 && !$opt_no_test)
$tar_file =~ /(mysql-[^\/]*)\.tar/;
$ver=$1;
$test_dir="$pwd/$host/test/$ver";
$ENV{"LD_LIBRARY_PATH"}= "$testdir/lib:" . $ENV{"LD_LIBRARY_PATH"};
if ($opt_stage <= 5 && !$opt_no_test && !$opt_no_mysqltest)
{
@ -237,7 +244,7 @@ if ($opt_stage <= 7 && $opt_perl_files && !$opt_no_perl && !$opt_no_test)
}
if ($opt_stage <= 8 && !$opt_no_test)
if ($opt_stage <= 8 && !$opt_no_test && !$opt_no_crash_me)
{
safe_cd("$test_dir/sql-bench");
log_system("rm -f limits/mysql.cfg");

File diff suppressed because it is too large Load Diff

View File

@ -16,7 +16,7 @@
/* Show databases, tables or columns */
#define SHOW_VERSION "8.2"
#define SHOW_VERSION "8.3"
#include <global.h>
#include <my_sys.h>
@ -30,6 +30,7 @@
static my_string host=0,opt_password=0,user=0;
static my_bool opt_show_keys=0,opt_compress=0,opt_status=0;
static uint opt_verbose=0;
static void get_options(int *argc,char ***argv);
static uint opt_mysql_port=0;
@ -140,6 +141,7 @@ static struct option long_options[] =
#ifndef DONT_ALLOW_USER_CHANGE
{"user", required_argument, 0, 'u'},
#endif
{"verbose", no_argument, 0, 'v'},
{"version", no_argument, 0, 'V'},
{0, 0, 0, 0}
};
@ -181,6 +183,8 @@ static void usage(void)
-u, --user=# user for login if not current user\n");
#endif
printf("\
-v, --verbose more verbose output; You can use this multiple times\n\
to get even more verbose output.\n\
-V, --version output version information and exit\n");
puts("\n\
@ -200,7 +204,7 @@ get_options(int *argc,char ***argv)
int c,option_index;
my_bool tty_password=0;
while ((c=getopt_long(*argc,*argv,"c:h:p::u:#::P:S:Ck?VWi",long_options,
while ((c=getopt_long(*argc,*argv,"c:h:p::u:#::P:S:Ck?vVWi",long_options,
&option_index)) != EOF)
{
switch(c) {
@ -210,6 +214,9 @@ get_options(int *argc,char ***argv)
case 'c':
charsets_dir= optarg;
break;
case 'v':
opt_verbose++;
break;
case 'h':
host = optarg;
break;
@ -277,10 +284,13 @@ static int
list_dbs(MYSQL *mysql,const char *wild)
{
const char *header;
uint length;
uint length, counter = 0;
ulong rowcount = 0L;
char tables[NAME_LEN+1], rows[NAME_LEN+1];
char query[255];
MYSQL_FIELD *field;
MYSQL_RES *result;
MYSQL_ROW row;
MYSQL_ROW row, trow, rrow;
if (!(result=mysql_list_dbs(mysql,wild)))
{
@ -297,10 +307,79 @@ list_dbs(MYSQL *mysql,const char *wild)
if (length < field->max_length)
length=field->max_length;
print_header(header,length,NullS);
if (!opt_verbose)
print_header(header,length,NullS);
else if (opt_verbose == 1)
print_header(header,length,"Tables",6,NullS);
else
print_header(header,length,"Tables",6,"Total Rows",12,NullS);
while ((row = mysql_fetch_row(result)))
print_row(row[0],length,0);
print_trailer(length,0);
{
counter++;
if (opt_verbose)
{
/*
* Original code by MG16373; Slightly modified by Monty.
* Print now the count of tables and rows for each database.
*/
if (!(mysql_select_db(mysql,row[0])))
{
MYSQL_RES *tresult = mysql_list_tables(mysql,(char*)NULL);
if (mysql_affected_rows(mysql) > 0)
{
sprintf(tables,"%6lu",(ulong) mysql_affected_rows(mysql));
rowcount = 0;
if (opt_verbose > 1)
{
while ((trow = mysql_fetch_row(tresult)))
{
sprintf(query,"SELECT COUNT(*) FROM `%s`",trow[0]);
if (!(mysql_query(mysql,query)))
{
MYSQL_RES *rresult;
if ((rresult = mysql_store_result(mysql)))
{
rrow = mysql_fetch_row(rresult);
rowcount += (ulong) strtoull(rrow[0], (char**) 0, 10);
mysql_free_result(rresult);
}
}
}
sprintf(rows,"%12lu",rowcount);
}
}
else
{
sprintf(tables,"%6d",0);
sprintf(rows,"%12d",0);
}
mysql_free_result(tresult);
}
else
{
strmov(tables,"N/A");
strmov(rows,"N/A");
}
}
if (!opt_verbose)
print_row(row[0],length,0);
else if (opt_verbose == 1)
print_row(row[0],length,tables,6,NullS);
else
print_row(row[0],length,tables,6,rows,12,NullS);
}
print_trailer(length,
(opt_verbose > 0 ? 6 : 0),
(opt_verbose > 1 ? 12 :0),
0);
if (counter && opt_verbose)
printf("%u row%s in set.\n",counter,(counter > 1) ? "s" : "");
mysql_free_result(result);
return 0;
}
@ -310,10 +389,11 @@ static int
list_tables(MYSQL *mysql,const char *db,const char *table)
{
const char *header;
uint head_length;
uint head_length, counter = 0;
char query[255], rows[64], fields[16];
MYSQL_FIELD *field;
MYSQL_RES *result;
MYSQL_ROW row;
MYSQL_ROW row, rrow;
if (mysql_select_db(mysql,db))
{
@ -338,14 +418,81 @@ list_tables(MYSQL *mysql,const char *db,const char *table)
if (head_length < field->max_length)
head_length=field->max_length;
print_header(header,head_length,NullS);
if (!opt_verbose)
print_header(header,head_length,NullS);
else if (opt_verbose == 1)
print_header(header,head_length,"Columns",8,NullS);
else
print_header(header,head_length,"Columns",8, "Total Rows",10,NullS);
while ((row = mysql_fetch_row(result)))
print_row(row[0],head_length,0);
print_trailer(head_length,0);
{
/*
* Modified by MG16373
* Print now the count of rows for each table.
*/
counter++;
if (opt_verbose > 0)
{
if (!(mysql_select_db(mysql,db)))
{
MYSQL_RES *rresult = mysql_list_fields(mysql,row[0],NULL);
ulong rowcount=0L;
if (!rresult)
{
strmov(fields,"N/A");
strmov(rows,"N/A");
}
else
{
sprintf(fields,"%8u",(uint) mysql_num_fields(rresult));
mysql_free_result(rresult);
if (opt_verbose > 1)
{
sprintf(query,"SELECT COUNT(*) FROM `%s`",row[0]);
if (!(mysql_query(mysql,query)))
{
if ((rresult = mysql_store_result(mysql)))
{
rrow = mysql_fetch_row(rresult);
rowcount += (unsigned long) strtoull(rrow[0], (char**) 0, 10);
mysql_free_result(rresult);
}
sprintf(rows,"%10lu",rowcount);
}
else
sprintf(rows,"%10d",0);
}
}
}
else
{
strmov(fields,"N/A");
strmov(rows,"N/A");
}
}
if (!opt_verbose)
print_row(row[0],head_length,NullS);
else if (opt_verbose == 1)
print_row(row[0],head_length, fields,8, NullS);
else
print_row(row[0],head_length, fields,8, rows,10, NullS);
}
print_trailer(head_length,
(opt_verbose > 0 ? 8 : 0),
(opt_verbose > 1 ? 10 :0),
0);
if (counter && opt_verbose)
printf("%u row%s in set.\n\n",counter,(counter > 1) ? "s" : "");
mysql_free_result(result);
return 0;
}
static int
list_table_status(MYSQL *mysql,const char *db,const char *wild)
{

View File

@ -31,7 +31,8 @@ struct mem_area_struct{
};
/* Each memory area takes this many extra bytes for control information */
#define MEM_AREA_EXTRA_SIZE (sizeof(struct mem_area_struct))
#define MEM_AREA_EXTRA_SIZE (ut_calc_align(sizeof(struct mem_area_struct),\
UNIV_MEM_ALIGNMENT))
/************************************************************************
Creates a memory pool. */

View File

@ -171,10 +171,10 @@ page_cur_search(
ut_ad(dtuple_check_typed(tuple));
page_cur_search_with_match(page, tuple, mode,
&low_matched_fields,
&low_matched_bytes,
&up_matched_fields,
&up_matched_bytes,
&low_matched_fields,
&low_matched_bytes,
cursor);
return(low_matched_fields);
}

View File

@ -2207,11 +2207,11 @@ row_sel_get_clust_rec_for_mysql(
visit through secondary index records that would not really
exist in our snapshot. */
if ((old_vers || rec_get_deleted_flag(rec))
if (clust_rec && (old_vers || rec_get_deleted_flag(rec))
&& !row_sel_sec_rec_is_for_clust_rec(rec, sec_index,
clust_rec, clust_index)) {
clust_rec = NULL;
}
}
}
*out_rec = clust_rec;

View File

@ -176,7 +176,7 @@ trx_rollback_all_without_sess(void)
if (UT_LIST_GET_FIRST(trx_sys->trx_list)) {
fprintf(stderr,
"Innobase: Starting rollback of uncommitted transactions\n");
"InnoDB: Starting rollback of uncommitted transactions\n");
} else {
return;
}
@ -196,7 +196,7 @@ loop:
if (trx == NULL) {
fprintf(stderr,
"Innobase: Rollback of uncommitted transactions completed\n");
"InnoDB: Rollback of uncommitted transactions completed\n");
mem_heap_free(heap);
@ -221,7 +221,7 @@ loop:
ut_a(thr == que_fork_start_command(fork, SESS_COMM_EXECUTE, 0));
fprintf(stderr, "Innobase: Rolling back trx no %lu\n",
fprintf(stderr, "InnoDB: Rolling back trx no %lu\n",
ut_dulint_get_low(trx->id));
mutex_exit(&kernel_mutex);
@ -238,7 +238,7 @@ loop:
mutex_exit(&kernel_mutex);
fprintf(stderr,
"Innobase: Waiting rollback of trx no %lu to end\n",
"InnoDB: Waiting rollback of trx no %lu to end\n",
ut_dulint_get_low(trx->id));
os_thread_sleep(100000);
@ -264,7 +264,7 @@ loop:
mutex_exit(&(dict_sys->mutex));
}
fprintf(stderr, "Innobase: Rolling back of trx no %lu completed\n",
fprintf(stderr, "InnoDB: Rolling back of trx no %lu completed\n",
ut_dulint_get_low(trx->id));
mem_heap_free(heap);

View File

@ -198,7 +198,7 @@ trx_sys_init_at_db_start(void)
if (UT_LIST_GET_LEN(trx_sys->trx_list) > 0) {
fprintf(stderr,
"Innobase: %lu uncommitted transaction(s) which must be rolled back\n",
"InnoDB: %lu uncommitted transaction(s) which must be rolled back\n",
UT_LIST_GET_LEN(trx_sys->trx_list));
}

View File

@ -954,7 +954,7 @@ static int _nisam_cmp_buffer(File file, const byte *buff, ulong filepos, uint le
{
if (my_read(file,temp_buff,next_length,MYF(MY_NABP)))
goto err;
if (memcmp((byte*) buff,temp_buff,IO_SIZE))
if (memcmp((byte*) buff,temp_buff,next_length))
DBUG_RETURN(1);
buff+=next_length;
length-= next_length;

View File

@ -1221,20 +1221,19 @@ static int _mi_cmp_buffer(File file, const byte *buff, my_off_t filepos,
char temp_buff[IO_SIZE*2];
DBUG_ENTER("_mi_cmp_buffer");
VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0)));
next_length= IO_SIZE*2 - (uint) (filepos & (IO_SIZE-1));
while (length > IO_SIZE*2)
{
if (my_read(file,temp_buff,next_length,MYF(MY_NABP)))
if (my_pread(file,temp_buff,next_length,filepos, MYF(MY_NABP)) ||
memcmp((byte*) buff,temp_buff,next_length))
goto err;
if (memcmp((byte*) buff,temp_buff,IO_SIZE))
DBUG_RETURN(1);
filepos+=next_length;
buff+=next_length;
length-= next_length;
next_length=IO_SIZE*2;
}
if (my_read(file,temp_buff,length,MYF(MY_NABP)))
if (my_pread(file,temp_buff,length,filepos,MYF(MY_NABP)))
goto err;
DBUG_RETURN(memcmp((byte*) buff,temp_buff,length));
err:

View File

@ -1010,7 +1010,7 @@ uint _mi_pack_get_block_info(MI_INFO *myisam, MI_BLOCK_INFO *info, File file,
{
ref_length=myisam->s->pack.ref_length;
/*
We can't use my_pread() here because mi_rad_pack_record assumes
We can't use my_pread() here because mi_read_rnd_pack_record assumes
position is ok
*/
VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0)));

View File

@ -155,6 +155,10 @@ while test $# -gt 0; do
EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT $1"
SLEEP_TIME=`$ECHO "$1" | $SED -e "s;--sleep=;;"`
;;
--mysqld=*)
TMP=`$ECHO "$1" | $SED -e "s;--mysqld-=;"`
EXTRA_MYSQL_TEST_OPT="$EXTRA_MYSQL_TEST_OPT $TMP"
;;
--gcov )
if [ x$BINARY_DIST = x1 ] ; then
$ECHO "Cannot do coverage test without the source - please use source dist"
@ -170,6 +174,7 @@ while test $# -gt 0; do
$ECHO "Note: you will get more meaningful output on a source distribution compiled with debugging option when running tests with --gdb option"
fi
DO_GDB=1
USE_RUNNING_SERVER=""
;;
--client-gdb )
if [ x$BINARY_DIST = x1 ] ; then
@ -182,6 +187,7 @@ while test $# -gt 0; do
$ECHO "Note: you will get more meaningful output on a source distribution compiled with debugging option when running tests with --ddd option"
fi
DO_DDD=1
USE_RUNNING_SERVER=""
;;
--skip-*)
EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT $1"

View File

@ -27,3 +27,8 @@ n
12
Table Op Msg_type Msg_text
test.t1 optimize status OK
i
1
2
3
4

View File

@ -0,0 +1,2 @@
Table Op Msg_type Msg_text
test.t1 check status OK

View File

@ -8,3 +8,7 @@ b
1 10000000001
a$1 $b c$
1 2 3
table type possible_keys key key_len ref rows Extra
t2 ref B B 21 const 1 where used
a B
3 world

View File

@ -1,7 +1,7 @@
@test @`select` @TEST @not_used
1 2 3 NULL
@test_int @test_double @test_string @test_string2 @select
10 0.00 abcdeghi abcdefghij NULL
10 1e-10 abcdeghi abcdefghij NULL
@test_int @test_double @test_string @test_string2
hello hello hello hello
@test_int @test_double @test_string @test_string2
@ -10,3 +10,5 @@ hellohello hellohello hellohello hellohello
NULL NULL NULL NULL
@t1:=(@t2:=1)+@t3:=4 @t1 @t2 @t3
5 5 1 4
@t5
1.23456

View File

@ -71,7 +71,6 @@ ALTER TABLE t1 ADD Column new_col int not null;
UNLOCK TABLES;
OPTIMIZE TABLE t1;
DROP TABLE t1;
drop table if exists t1;
#
# ALTER TABLE ... ENABLE/DISABLE KEYS
@ -91,3 +90,13 @@ while ($1)
}
alter table t1 enable keys;
drop table t1;
#
# Drop and add an auto_increment column
#
create table t1 (i int unsigned not null auto_increment primary key);
insert into t1 values (null),(null),(null),(null);
alter table t1 drop i,add i int unsigned not null auto_increment, drop primary key, add primary key (i);
select * from t1;
drop table t1;

18
mysql-test/t/check.test Normal file
View File

@ -0,0 +1,18 @@
connect (con1,localhost,root,,);
connect (con2,localhost,root,,);
connection con1;
drop table if exists t1;
#add a lot of keys to slow down check
create table t1(n int not null, key(n), key(n), key(n), key(n));
let $1=10000;
while ($1)
{
eval insert into t1 values ($1);
dec $1;
}
send check table t1 type=extended;
connection con2;
insert into t1 values (200000);
connection con1;
reap;

View File

@ -2,6 +2,7 @@
# Check some special create statements.
#
drop table if exists t1,t2;
create table t1 (b char(0));
insert into t1 values (""),(null);
select * from t1;
@ -57,3 +58,14 @@ select a$1, $b, c$ from test_$1.$test1;
create table test_$1.test2$ (a int);
drop table test_$1.test2$;
drop database test_$1;
#
# Test of CREATE ... SELECT with indexes
#
create table t1 (a int auto_increment not null primary key, B CHAR(20));
insert into t1 (b) values ("hello"),("my"),("world");
create table t2 (key (b)) select * from t1;
explain select * from t2 where b="world";
select * from t2 where b="world";
drop table t1,t2;

View File

@ -1,7 +1,7 @@
#
# test variables
#
set @`test`=1,@TEST=3,@select=2;
set @`test`=1,@TEST=3,@select=2,@t5=1.23456;
select @test,@`select`,@TEST,@not_used;
set @test_int=10,@test_double=1e-10,@test_string="abcdeghi",@test_string2="abcdefghij",@select=NULL;
select @test_int,@test_double,@test_string,@test_string2,@select;
@ -12,3 +12,5 @@ select @test_int,@test_double,@test_string,@test_string2;
set @test_int=null,@test_double=null,@test_string=null,@test_string2=null;
select @test_int,@test_double,@test_string,@test_string2;
select @t1:=(@t2:=1)+@t3:=4,@t1,@t2,@t3;
select @t5;

View File

@ -66,11 +66,11 @@ uint my_pread(File Filedes, byte *Buffer, uint Count, my_off_t offset,
my_filename(Filedes),my_errno);
}
if ((int) readbytes == -1 || (MyFlags & (MY_FNABP | MY_NABP)))
DBUG_RETURN(MY_FILE_ERROR); /* Return with error */
DBUG_RETURN(MY_FILE_ERROR); /* Return with error */
}
if (MyFlags & (MY_NABP | MY_FNABP))
DBUG_RETURN(0); /* Ok vid l{sning */
DBUG_RETURN(readbytes); /* purecov: inspected */
DBUG_RETURN(0); /* Read went ok; Return 0 */
DBUG_RETURN(readbytes); /* purecov: inspected */
}
} /* my_pread */

View File

@ -11,30 +11,29 @@ export machine system version
SOURCE=`pwd`
CP="cp -p"
# Debug option must come first
STRIP=1
DEBUG=0
if test x$1 = x"--debug"
then
DEBUG=1
shift 1
fi
# Save temporary distribution here (must be full path)
SILENT=0
TMP=/tmp
if test $# -gt 0
then
TMP=$1
shift 1
fi
# Get optional suffix for distribution
SUFFIX=""
if test $# -gt 0
then
SUFFIX=$1
shift 1
fi
parse_arguments() {
for arg do
case "$arg" in
--debug) DEBUG=1;;
--tmp=*) TMP=`echo "$arg" | sed -e "s;--tmp=;;"` ;;
--suffix=*) SUFFIX=`echo "$arg" | sed -e "s;--suffix=;;"` ;;
--no-strip) STRIP=0 ;;
--silent) SILENT=1 ;;
*)
echo "Unknown argument '$arg'"
exit 1
;;
esac
done
}
parse_arguments "$@"
#make
@ -68,14 +67,18 @@ for i in extra/comp_err extra/replace extra/perror extra/resolveip \
client/mysql sql/mysqld client/mysqlshow client/mysqlcheck \
client/mysqladmin client/mysqldump client/mysqlimport client/mysqltest \
client/.libs/mysql client/.libs/mysqlshow client/.libs/mysqladmin \
client/.libs/mysqldump client/.libs/mysqlimport client/.libs/mysqltest
client/.libs/mysqldump client/.libs/mysqlimport client/.libs/mysqltest \
client/.libs/mysqlcheck
do
if [ -f $i ]
then
$CP $i $BASE/bin
fi
done
strip $BASE/bin/*
if [ x$STRIP = x1 ] ; then
strip $BASE/bin/*
fi
for i in sql/mysqld.sym.gz
do
@ -190,7 +193,13 @@ fi
echo "Using $tar to create archive"
cd $TMP
$tar cvf $SOURCE/$NEW_NAME.tar $NEW_NAME
OPT=cvf
if [ x$SILENT = x1 ] ; then
OPT=cf
fi
$tar $OPT $SOURCE/$NEW_NAME.tar $NEW_NAME
cd $SOURCE
echo "Compressing archive"
gzip -9 $NEW_NAME.tar

View File

@ -1,17 +1,24 @@
# This file describes how to run MySQL benchmarks with PostgreSQL
# This file describes how to run MySQL benchmark suite with PostgreSQL
#
# WARNING:
#
# Don't run the --fast test on a PostgreSQL 7.1.1 database on
# which you have any critical data; During one of our test runs
# PostgreSQL got a corrupted database and all data was destroyed!
# (When we tried to restart postmaster, It died with a
# When we tried to restart postmaster, It died with a
# 'no such file or directory' error and never recovered from that!
#
# Another time vacuum() filled our system disk with had 6G free
# while vaccuming a table of 60 M.
#
# We have sent a mail about this to the PostgreSQL mailing list, so
# the PostgreSQL developers should be aware of these problems and should
# hopefully fix this soon.
#
# WARNING
# The test was run on a Intel Xeon 2x 550 Mzh machine with 1G memory,
# 9G hard disk. The OS is Suse 6.4, with Linux 2.2.14 compiled with SMP
# 9G hard disk. The OS is Suse 7.1, with Linux 2.4.0 compiled with SMP
# support
# Both the perl client and the database server is run
# on the same machine. No other cpu intensive process was used during
@ -73,8 +80,15 @@ make install
run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql
# and a test where we do a vacuum() after each update.
# (The time for vacuum() is counted in the book-keeping() column)
# When running with --fast we run the following vacuum commands on
# the database between each major update of the tables:
# vacuum table
# or
# vacuum analyze
# vacuum
# The time for vacuum() is accounted for in the book-keeping() column, not
# in the test that updates the database.
run-all-tests --comment="Intel Xeon, 2x550 Mhz, 512M, pg started with -o -F" --user=postgres --server=pg --cmp=mysql --fast

View File

@ -21,7 +21,7 @@ benchdir_root= $(prefix)
benchdir = $(benchdir_root)/sql-bench
bench_SCRIPTS = test-ATIS test-connect test-create test-insert \
test-big-tables test-select test-wisconsin \
test-alter-table \
test-alter-table graph-compare-results \
bench-init.pl compare-results run-all-tests \
server-cfg crash-me copy-db bench-count-distinct
CLEANFILES = $(bench_SCRIPTS)
@ -30,7 +30,7 @@ EXTRA_SCRIPTS = test-ATIS.sh test-connect.sh test-create.sh \
test-alter-table.sh test-wisconsin.sh \
bench-init.pl.sh compare-results.sh server-cfg.sh \
run-all-tests.sh crash-me.sh copy-db.sh \
bench-count-distinct.sh
bench-count-distinct.sh graph-compare-results.sh
EXTRA_DIST = $(EXTRA_SCRIPTS)
dist-hook:

View File

@ -11,7 +11,7 @@ In this directory are the queries and raw data files used to populate
the MySQL benchmarks. In order to run the benchmarks you should normally
execute a command like the following:
run-all-tests --server=msyql --cmp=mysql,pg,solid --user=test --password=test --log
run-all-tests --server=mysql --cmp=mysql,pg,solid --user=test --password=test --log
The above means that one wants to run the benchmark with MySQL. The limits
should be taken from all of mysql,PostgreSQL and Solid. Login name and

View File

@ -1,19 +0,0 @@
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:46:54
ATIS table test
Creating tables
Time for create_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (9768): 2 wallclock secs ( 0.49 usr 0.34 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Retrieving data
Time for select_simple_join (500): 2 wallclock secs ( 0.63 usr 0.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_join (200): 15 wallclock secs ( 4.21 usr 2.20 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_distinct (800): 12 wallclock secs ( 1.70 usr 0.68 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_group (2600): 12 wallclock secs ( 1.43 usr 0.39 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Removing tables
Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 43 wallclock secs ( 8.46 usr 3.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)

View File

@ -0,0 +1,20 @@
Testing server 'MySQL 3.23.39' at 2001-06-05 19:26:17
ATIS table test
Creating tables
Time for create_table (28): 0 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Inserting data
Time to insert (9768): 3 wallclock secs ( 0.45 usr 0.44 sys + 0.00 cusr 0.00 csys = 0.89 CPU)
Retrieving data
Time for select_simple_join (500): 3 wallclock secs ( 0.68 usr 0.19 sys + 0.00 cusr 0.00 csys = 0.87 CPU)
Time for select_join (100): 3 wallclock secs ( 0.51 usr 0.20 sys + 0.00 cusr 0.00 csys = 0.71 CPU)
Time for select_key_prefix_join (100): 13 wallclock secs ( 4.08 usr 2.01 sys + 0.00 cusr 0.00 csys = 6.09 CPU)
Time for select_distinct (800): 15 wallclock secs ( 1.75 usr 0.69 sys + 0.00 cusr 0.00 csys = 2.44 CPU)
Time for select_group (2600): 20 wallclock secs ( 1.57 usr 0.41 sys + 0.00 cusr 0.00 csys = 1.98 CPU)
Removing tables
Time to drop_table (28): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 57 wallclock secs ( 9.06 usr 3.94 sys + 0.00 cusr 0.00 csys = 13.00 CPU)

View File

@ -1,75 +0,0 @@
Benchmark DBD suite: 2.9
Date of test: 2000-08-17 19:09:48
Running tests on: Linux 2.2.14-my-SMP i686
Arguments:
Comments: Intel Xeon, 2x550 Mhz, 1G ram, key_buffer=16M
Limits from: mysql,pg
Server version: MySQL 3.23.22 beta
ATIS: Total time: 43 wallclock secs ( 8.46 usr 3.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
alter-table: Total time: 260 wallclock secs ( 0.27 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
big-tables: Total time: 30 wallclock secs ( 8.19 usr 6.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
connect: Total time: 53 wallclock secs (26.25 usr 9.76 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
create: Total time: 121 wallclock secs ( 8.83 usr 3.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
insert: Total time: 1592 wallclock secs (254.20 usr 98.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
select: Total time: 1692 wallclock secs (111.29 usr 65.22 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
wisconsin: Total time: 16 wallclock secs ( 2.87 usr 1.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
All 8 test executed successfully
Totals per operation:
Operation seconds usr sys cpu tests
alter_table_add 252.00 0.20 0.02 0.00 992
connect 10.00 6.60 1.51 0.00 10000
connect+select_1_row 13.00 7.08 2.47 0.00 10000
connect+select_simple 13.00 7.36 2.24 0.00 10000
count 46.00 0.07 0.00 0.00 100
count_distinct 124.00 0.65 0.16 0.00 1000
count_distinct_big 623.00 69.07 56.00 0.00 1020
count_distinct_group 77.00 0.94 0.33 0.00 1000
count_distinct_group_on_key 64.00 0.37 0.07 0.00 1000
count_distinct_group_on_key_parts 77.00 0.93 0.45 0.00 1000
count_group_on_key_parts 61.00 1.09 0.27 0.00 1000
count_on_key 574.00 16.11 3.17 0.00 50100
create+drop 26.00 2.10 0.81 0.00 10000
create_MANY_tables 32.00 1.97 0.49 0.00 10000
create_index 4.00 0.00 0.00 0.00 8
create_key+drop 40.00 3.64 0.72 0.00 10000
create_table 0.00 0.00 0.00 0.00 31
delete_big 21.00 0.00 0.00 0.00 13
delete_big_many_keys 120.00 0.00 0.00 0.00 2
delete_key 4.00 0.50 0.47 0.00 10000
drop_index 4.00 0.00 0.00 0.00 8
drop_table 0.00 0.00 0.00 0.00 28
drop_table_when_MANY_tables 9.00 0.44 0.49 0.00 10000
insert 130.00 20.73 12.97 0.00 350768
insert_duplicates 113.00 18.31 11.27 0.00 300000
insert_key 159.00 8.91 4.08 0.00 100000
insert_many_fields 8.00 0.29 0.08 0.00 2000
min_max 31.00 0.03 0.00 0.00 60
min_max_on_key 213.00 25.00 4.86 0.00 85000
order_by 47.00 19.72 16.45 0.00 10
order_by_key 31.00 19.75 10.54 0.00 10
select_1_row 3.00 0.74 0.62 0.00 10000
select_2_rows 3.00 0.45 0.58 0.00 10000
select_big 37.00 23.09 11.64 0.00 10080
select_column+column 3.00 0.52 0.59 0.00 10000
select_diff_key 210.00 0.28 0.07 0.00 500
select_distinct 12.00 1.70 0.68 0.00 800
select_group 70.00 1.49 0.40 0.00 2711
select_group_when_MANY_tables 14.00 0.68 0.63 0.00 10000
select_join 15.00 4.21 2.20 0.00 200
select_key 129.00 66.05 14.03 0.00 200000
select_key_prefix 130.00 67.36 13.74 0.00 200000
select_many_fields 22.00 7.89 6.66 0.00 2000
select_range 21.00 7.00 1.72 0.00 25420
select_range_prefix 18.00 6.07 1.50 0.00 25010
select_simple 2.00 0.52 0.49 0.00 10000
select_simple_join 2.00 0.63 0.32 0.00 500
update_big 65.00 0.01 0.00 0.00 500
update_of_key 25.00 2.51 2.23 0.00 500
update_of_key_big 33.00 0.06 0.00 0.00 501
update_of_primary_key_many_keys 67.00 0.00 0.01 0.00 256
update_with_key 109.00 13.71 11.48 0.00 100000
wisc_benchmark 4.00 1.75 0.68 0.00 114
TOTALS 3920.00 438.58 200.19 0.00 1594242

View File

@ -0,0 +1,101 @@
Benchmark DBD suite: 2.12
Date of test: 2001-06-05 19:27:31
Running tests on: Linux 2.4.0-64GB-SMP i686
Arguments:
Comments: Intel Xeon, 2x550 Mhz, 512M, key_buffer=16M
Limits from: mysql,pg
Server version: MySQL 3.23.39
ATIS: Total time: 57 wallclock secs ( 9.06 usr 3.94 sys + 0.00 cusr 0.00 csys = 13.00 CPU)
alter-table: Total time: 271 wallclock secs ( 0.18 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.20 CPU)
big-tables: Total time: 33 wallclock secs ( 9.40 usr 7.64 sys + 0.00 cusr 0.00 csys = 17.04 CPU)
connect: Total time: 86 wallclock secs (33.98 usr 18.10 sys + 0.00 cusr 0.00 csys = 52.08 CPU)
create: Total time: 103 wallclock secs ( 7.83 usr 3.60 sys + 0.00 cusr 0.00 csys = 11.43 CPU)
insert: Total time: 2736 wallclock secs (661.21 usr 182.47 sys + 0.00 cusr 0.00 csys = 843.68 CPU)
select: Total time: 1949 wallclock secs (70.03 usr 16.42 sys + 0.00 cusr 0.00 csys = 86.45 CPU)
wisconsin: Total time: 19 wallclock secs ( 3.92 usr 1.70 sys + 0.00 cusr 0.00 csys = 5.62 CPU)
All 8 test executed successfully
Totals per operation:
Operation seconds usr sys cpu tests
alter_table_add 261.00 0.13 0.02 0.15 992
connect 16.00 6.84 2.50 9.34 10000
connect+select_1_row 15.00 7.11 3.70 10.81 10000
connect+select_simple 13.00 6.70 3.21 9.91 10000
count 45.00 0.01 0.00 0.01 100
count_distinct 60.00 0.42 0.08 0.50 1000
count_distinct_2 63.00 0.18 0.03 0.21 1000
count_distinct_big 165.00 7.78 3.16 10.94 120
count_distinct_group 194.00 1.21 0.37 1.58 1000
count_distinct_group_on_key 59.00 0.51 0.07 0.58 1000
count_distinct_group_on_key_parts 194.00 1.12 0.46 1.58 1000
count_distinct_key_prefix 51.00 0.45 0.08 0.53 1000
count_group_on_key_parts 58.00 1.16 0.35 1.51 1000
count_on_key 586.00 16.61 2.71 19.32 50100
create+drop 33.00 2.94 0.82 3.76 10000
create_MANY_tables 18.00 1.02 0.62 1.64 5000
create_index 5.00 0.00 0.00 0.00 8
create_key+drop 41.00 3.05 0.66 3.71 10000
create_table 0.00 0.01 0.00 0.01 31
delete_all 17.00 0.00 0.00 0.00 12
delete_all_many_keys 75.00 0.03 0.00 0.03 1
delete_big 1.00 0.00 0.00 0.00 1
delete_big_many_keys 75.00 0.03 0.00 0.03 128
delete_key 4.00 0.76 0.29 1.05 10000
drop_index 5.00 0.00 0.00 0.00 8
drop_table 0.00 0.00 0.00 0.00 28
drop_table_when_MANY_tables 6.00 0.37 0.63 1.00 5000
insert 144.00 24.06 14.28 38.34 350768
insert_duplicates 31.00 5.06 3.72 8.78 100000
insert_key 137.00 9.91 6.26 16.17 100000
insert_many_fields 10.00 0.54 0.08 0.62 2000
insert_select_1_key 7.00 0.00 0.00 0.00 1
insert_select_2_keys 9.00 0.00 0.00 0.00 1
min_max 30.00 0.04 0.01 0.05 60
min_max_on_key 230.00 28.28 4.43 32.71 85000
order_by_big 78.00 22.39 9.83 32.22 10
order_by_big_key 33.00 23.35 10.15 33.50 10
order_by_big_key2 32.00 22.53 9.81 32.34 10
order_by_big_key_desc 36.00 23.47 10.27 33.74 10
order_by_big_key_diff 74.00 22.66 9.76 32.42 10
order_by_big_key_prefix 33.00 22.18 9.81 31.99 10
order_by_key2_diff 9.00 1.30 0.85 2.15 500
order_by_key_prefix 4.00 0.97 0.57 1.54 500
order_by_range 8.00 1.26 0.49 1.75 500
outer_join 110.00 0.00 0.00 0.00 10
outer_join_found 107.00 0.00 0.00 0.00 10
outer_join_not_found 59.00 0.00 0.00 0.00 500
outer_join_on_key 60.00 0.00 0.00 0.00 10
select_1_row 3.00 0.81 0.69 1.50 10000
select_2_rows 3.00 0.67 0.63 1.30 10000
select_big 63.00 32.72 16.55 49.27 10080
select_column+column 4.00 0.52 0.46 0.98 10000
select_diff_key 193.00 0.32 0.04 0.36 500
select_distinct 15.00 1.75 0.69 2.44 800
select_group 75.00 1.59 0.45 2.04 2711
select_group_when_MANY_tables 5.00 0.43 0.87 1.30 5000
select_join 3.00 0.51 0.20 0.71 100
select_key 132.00 53.98 10.53 64.51 200000
select_key2 139.00 78.61 11.08 89.69 200000
select_key2_return_key 131.00 64.58 9.61 74.19 200000
select_key2_return_prim 134.00 72.33 11.34 83.67 200000
select_key_prefix 141.00 86.32 12.05 98.37 200000
select_key_prefix_join 13.00 4.08 2.01 6.09 100
select_key_return_key 125.00 59.92 12.00 71.92 200000
select_many_fields 23.00 8.85 7.55 16.40 2000
select_query_cache 120.00 3.67 0.53 4.20 10000
select_query_cache2 120.00 3.80 0.57 4.37 10000
select_range 201.00 9.05 3.95 13.00 410
select_range_key2 21.00 7.15 1.40 8.55 25010
select_range_prefix 22.00 6.55 1.40 7.95 25010
select_simple 2.00 0.54 0.49 1.03 10000
select_simple_join 3.00 0.68 0.19 0.87 500
update_big 64.00 0.00 0.00 0.00 10
update_of_key 25.00 2.62 1.44 4.06 50000
update_of_key_big 35.00 0.05 0.04 0.09 501
update_of_primary_key_many_keys 47.00 0.01 0.02 0.03 256
update_with_key 119.00 18.44 12.64 31.08 300000
update_with_key_prefix 36.00 6.23 3.85 10.08 100000
wisc_benchmark 5.00 2.33 0.52 2.85 114
TOTALS 5323.00 795.55 233.87 1029.42 2551551

View File

@ -1,14 +0,0 @@
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:47:38
Testing of ALTER TABLE
Testing with 1000 columns and 1000 rows in 20 steps
Insert data into the table
Time for insert (1000) 0 wallclock secs ( 0.06 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for alter_table_add (992): 252 wallclock secs ( 0.20 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for create_index (8): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop_index (8): 4 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 260 wallclock secs ( 0.27 usr 0.09 sys + 0.00 cusr 0.00 csys = 0.00 CPU)

View File

@ -0,0 +1,14 @@
Testing server 'MySQL 3.23.39' at 2001-06-05 13:47:22
Testing of ALTER TABLE
Testing with 1000 columns and 1000 rows in 20 steps
Insert data into the table
Time for insert (1000) 0 wallclock secs ( 0.05 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Time for alter_table_add (992): 261 wallclock secs ( 0.13 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.15 CPU)
Time for create_index (8): 5 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop_index (8): 5 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 271 wallclock secs ( 0.18 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.20 CPU)

View File

@ -1,19 +0,0 @@
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:51:59
Testing of some unusual tables
All tests are done 1000 times with 1000 fields
Testing table with 1000 fields
Testing select * from table with 1 record
Time to select_many_fields(1000): 9 wallclock secs ( 4.07 usr 3.17 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select all_fields from table with 1 record
Time to select_many_fields(1000): 13 wallclock secs ( 3.82 usr 3.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert VALUES()
Time to insert_many_fields(1000): 3 wallclock secs ( 0.23 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing insert (all_fields) VALUES()
Time to insert_many_fields(1000): 5 wallclock secs ( 0.06 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 30 wallclock secs ( 8.19 usr 6.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU)

View File

@ -0,0 +1,19 @@
Testing server 'MySQL 3.23.39' at 2001-06-05 13:51:53
Testing of some unusual tables
All tests are done 1000 times with 1000 fields
Testing table with 1000 fields
Testing select * from table with 1 record
Time to select_many_fields(1000): 10 wallclock secs ( 4.43 usr 4.17 sys + 0.00 cusr 0.00 csys = 8.60 CPU)
Testing select all_fields from table with 1 record
Time to select_many_fields(1000): 13 wallclock secs ( 4.42 usr 3.38 sys + 0.00 cusr 0.00 csys = 7.80 CPU)
Testing insert VALUES()
Time to insert_many_fields(1000): 3 wallclock secs ( 0.46 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.49 CPU)
Testing insert (all_fields) VALUES()
Time to insert_many_fields(1000): 7 wallclock secs ( 0.08 usr 0.05 sys + 0.00 cusr 0.00 csys = 0.13 CPU)
Total time: 33 wallclock secs ( 9.40 usr 7.64 sys + 0.00 cusr 0.00 csys = 17.04 CPU)

View File

@ -1,30 +0,0 @@
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:52:30
Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
Time to connect (10000): 10 wallclock secs ( 6.60 usr 1.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test connect/simple select/disconnect
Time for connect+select_simple (10000): 13 wallclock secs ( 7.36 usr 2.24 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test simple select
Time for select_simple (10000): 2 wallclock secs ( 0.52 usr 0.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing connect/select 1 row from table/disconnect
Time to connect+select_1_row (10000): 13 wallclock secs ( 7.08 usr 2.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select 1 row from table
Time to select_1_row (10000): 3 wallclock secs ( 0.74 usr 0.62 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing select 2 rows from table
Time to select_2_rows (10000): 3 wallclock secs ( 0.45 usr 0.58 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test select with aritmetic (+)
Time for select_column+column (10000): 3 wallclock secs ( 0.52 usr 0.59 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing retrieval of big records (7000 bytes)
Time to select_big (10000): 6 wallclock secs ( 2.98 usr 1.26 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 53 wallclock secs (26.25 usr 9.76 sys + 0.00 cusr 0.00 csys = 0.00 CPU)

View File

@ -0,0 +1,30 @@
Testing server 'MySQL 3.23.39' at 2001-06-05 13:52:26
Testing the speed of connecting to the server and sending of data
All tests are done 10000 times
Testing connection/disconnect
Time to connect (10000): 16 wallclock secs ( 6.84 usr 2.50 sys + 0.00 cusr 0.00 csys = 9.34 CPU)
Test connect/simple select/disconnect
Time for connect+select_simple (10000): 13 wallclock secs ( 6.70 usr 3.21 sys + 0.00 cusr 0.00 csys = 9.91 CPU)
Test simple select
Time for select_simple (10000): 2 wallclock secs ( 0.54 usr 0.49 sys + 0.00 cusr 0.00 csys = 1.03 CPU)
Testing connect/select 1 row from table/disconnect
Time to connect+select_1_row (10000): 15 wallclock secs ( 7.11 usr 3.70 sys + 0.00 cusr 0.00 csys = 10.81 CPU)
Testing select 1 row from table
Time to select_1_row (10000): 3 wallclock secs ( 0.81 usr 0.69 sys + 0.00 cusr 0.00 csys = 1.50 CPU)
Testing select 2 rows from table
Time to select_2_rows (10000): 3 wallclock secs ( 0.67 usr 0.63 sys + 0.00 cusr 0.00 csys = 1.30 CPU)
Test select with aritmetic (+)
Time for select_column+column (10000): 4 wallclock secs ( 0.52 usr 0.46 sys + 0.00 cusr 0.00 csys = 0.98 CPU)
Testing retrieval of big records (65000 bytes)
Time to select_big (10000): 30 wallclock secs (10.79 usr 6.41 sys + 0.00 cusr 0.00 csys = 17.20 CPU)
Total time: 86 wallclock secs (33.98 usr 18.10 sys + 0.00 cusr 0.00 csys = 52.08 CPU)

View File

@ -1,18 +0,0 @@
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:53:24
Testing the speed of creating and droping tables
Testing with 10000 tables and 10000 loop count
Testing create of tables
Time for create_MANY_tables (10000): 32 wallclock secs ( 1.97 usr 0.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Accessing tables
Time to select_group_when_MANY_tables (10000): 14 wallclock secs ( 0.68 usr 0.63 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing drop
Time for drop_table_when_MANY_tables (10000): 9 wallclock secs ( 0.44 usr 0.49 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing create+drop
Time for create+drop (10000): 26 wallclock secs ( 2.10 usr 0.81 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for create_key+drop (10000): 40 wallclock secs ( 3.64 usr 0.72 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 121 wallclock secs ( 8.83 usr 3.14 sys + 0.00 cusr 0.00 csys = 0.00 CPU)

View File

@ -0,0 +1,18 @@
Testing server 'MySQL 3.23.39' at 2001-06-05 13:53:52
Testing the speed of creating and droping tables
Testing with 5000 tables and 10000 loop count
Testing create of tables
Time for create_MANY_tables (5000): 18 wallclock secs ( 1.02 usr 0.62 sys + 0.00 cusr 0.00 csys = 1.64 CPU)
Accessing tables
Time to select_group_when_MANY_tables (5000): 5 wallclock secs ( 0.43 usr 0.87 sys + 0.00 cusr 0.00 csys = 1.30 CPU)
Testing drop
Time for drop_table_when_MANY_tables (5000): 6 wallclock secs ( 0.37 usr 0.63 sys + 0.00 cusr 0.00 csys = 1.00 CPU)
Testing create+drop
Time for create+drop (10000): 33 wallclock secs ( 2.94 usr 0.82 sys + 0.00 cusr 0.00 csys = 3.76 CPU)
Time for create_key+drop (10000): 41 wallclock secs ( 3.05 usr 0.66 sys + 0.00 cusr 0.00 csys = 3.71 CPU)
Total time: 103 wallclock secs ( 7.83 usr 3.60 sys + 0.00 cusr 0.00 csys = 11.43 CPU)

View File

@ -1,58 +0,0 @@
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 17:55:26
Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
Generating random keys
Creating tables
Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
Time for insert (300000): 113 wallclock secs (18.31 usr 11.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for insert_duplicates (300000): 113 wallclock secs (18.31 usr 11.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Retrieving data from the table
Time for select_big (10:3000000): 30 wallclock secs (19.98 usr 10.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by_key (10:3000000): 31 wallclock secs (19.75 usr 10.54 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for order_by (10:3000000): 47 wallclock secs (19.72 usr 16.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_diff_key (500:1000): 210 wallclock secs ( 0.28 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range_prefix (5010:42084): 10 wallclock secs ( 2.48 usr 0.55 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (5010:42084): 11 wallclock secs ( 2.61 usr 0.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_key_prefix (200000): 130 wallclock secs (67.36 usr 13.74 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_key (200000): 129 wallclock secs (66.05 usr 14.03 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Test of compares with simple ranges
Time for select_range_prefix (20000:43500): 8 wallclock secs ( 3.59 usr 0.95 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (20000:43500): 8 wallclock secs ( 3.74 usr 0.79 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_group (111): 58 wallclock secs ( 0.06 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max_on_key (15000): 8 wallclock secs ( 4.40 usr 0.88 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max (60): 31 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_on_key (100): 56 wallclock secs ( 0.03 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count (100): 46 wallclock secs ( 0.07 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_big (20): 64 wallclock secs ( 0.03 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of keys with functions
Time for update_of_key (500): 25 wallclock secs ( 2.51 usr 2.23 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for update_of_key_big (501): 33 wallclock secs ( 0.06 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update with key
Time for update_with_key (100000): 109 wallclock secs (13.71 usr 11.48 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of all rows
Time for update_big (500): 65 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing delete
Time for delete_key (10000): 4 wallclock secs ( 0.50 usr 0.47 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for delete_big (12): 20 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Insert into table with 16 keys and with a primary key with 16 parts
Time for insert_key (100000): 159 wallclock secs ( 8.91 usr 4.08 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing update of keys
Time for update_of_primary_key_many_keys (256): 67 wallclock secs ( 0.00 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Deleting everything from table
Time for delete_big_many_keys (2): 120 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 1592 wallclock secs (254.20 usr 98.51 sys + 0.00 cusr 0.00 csys = 0.00 CPU)

View File

@ -0,0 +1,85 @@
Testing server 'MySQL 3.23.39' at 2001-06-05 13:55:36
Testing the speed of inserting data into 1 table and do some selects on it.
The tests are done with a table that has 100000 rows.
Generating random keys
Creating tables
Inserting 100000 rows in order
Inserting 100000 rows in reverse order
Inserting 100000 rows in random order
Time for insert (300000): 123 wallclock secs (21.22 usr 12.32 sys + 0.00 cusr 0.00 csys = 33.54 CPU)
Testing insert of duplicates
Time for insert_duplicates (100000): 31 wallclock secs ( 5.06 usr 3.72 sys + 0.00 cusr 0.00 csys = 8.78 CPU)
Retrieving data from the table
Time for select_big (10:3000000): 32 wallclock secs (21.78 usr 10.07 sys + 0.00 cusr 0.00 csys = 31.85 CPU)
Time for order_by_big_key (10:3000000): 33 wallclock secs (23.35 usr 10.15 sys + 0.00 cusr 0.00 csys = 33.50 CPU)
Time for order_by_big_key_desc (10:3000000): 36 wallclock secs (23.47 usr 10.27 sys + 0.00 cusr 0.00 csys = 33.74 CPU)
Time for order_by_big_key_prefix (10:3000000): 33 wallclock secs (22.18 usr 9.81 sys + 0.00 cusr 0.00 csys = 31.99 CPU)
Time for order_by_big_key2 (10:3000000): 32 wallclock secs (22.53 usr 9.81 sys + 0.00 cusr 0.00 csys = 32.34 CPU)
Time for order_by_big_key_diff (10:3000000): 74 wallclock secs (22.66 usr 9.76 sys + 0.00 cusr 0.00 csys = 32.42 CPU)
Time for order_by_big (10:3000000): 78 wallclock secs (22.39 usr 9.83 sys + 0.00 cusr 0.00 csys = 32.22 CPU)
Time for order_by_range (500:125750): 8 wallclock secs ( 1.26 usr 0.49 sys + 0.00 cusr 0.00 csys = 1.75 CPU)
Time for order_by_key_prefix (500:125750): 4 wallclock secs ( 0.97 usr 0.57 sys + 0.00 cusr 0.00 csys = 1.54 CPU)
Time for order_by_key2_diff (500:250500): 9 wallclock secs ( 1.30 usr 0.85 sys + 0.00 cusr 0.00 csys = 2.15 CPU)
Time for select_diff_key (500:1000): 193 wallclock secs ( 0.32 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.36 CPU)
Time for select_range_prefix (5010:42084): 13 wallclock secs ( 2.55 usr 0.51 sys + 0.00 cusr 0.00 csys = 3.06 CPU)
Time for select_range_key2 (5010:42084): 12 wallclock secs ( 2.81 usr 0.68 sys + 0.00 cusr 0.00 csys = 3.49 CPU)
Time for select_key_prefix (200000): 141 wallclock secs (86.32 usr 12.05 sys + 0.00 cusr 0.00 csys = 98.37 CPU)
Time for select_key (200000): 132 wallclock secs (53.98 usr 10.53 sys + 0.00 cusr 0.00 csys = 64.51 CPU)
Time for select_key_return_key (200000): 125 wallclock secs (59.92 usr 12.00 sys + 0.00 cusr 0.00 csys = 71.92 CPU)
Time for select_key2 (200000): 139 wallclock secs (78.61 usr 11.08 sys + 0.00 cusr 0.00 csys = 89.69 CPU)
Time for select_key2_return_key (200000): 131 wallclock secs (64.58 usr 9.61 sys + 0.00 cusr 0.00 csys = 74.19 CPU)
Time for select_key2_return_prim (200000): 134 wallclock secs (72.33 usr 11.34 sys + 0.00 cusr 0.00 csys = 83.67 CPU)
Test of compares with simple ranges
Time for select_range_prefix (20000:43500): 9 wallclock secs ( 4.00 usr 0.89 sys + 0.00 cusr 0.00 csys = 4.89 CPU)
Time for select_range_key2 (20000:43500): 9 wallclock secs ( 4.34 usr 0.72 sys + 0.00 cusr 0.00 csys = 5.06 CPU)
Time for select_group (111): 55 wallclock secs ( 0.02 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.06 CPU)
Time for min_max_on_key (15000): 8 wallclock secs ( 5.12 usr 0.76 sys + 0.00 cusr 0.00 csys = 5.88 CPU)
Time for min_max (60): 30 wallclock secs ( 0.04 usr 0.01 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Time for count_on_key (100): 52 wallclock secs ( 0.03 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.05 CPU)
Time for count (100): 45 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Time for count_distinct_big (20): 98 wallclock secs ( 0.01 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.01 CPU)
Testing update of keys with functions
Time for update_of_key (50000): 25 wallclock secs ( 2.62 usr 1.44 sys + 0.00 cusr 0.00 csys = 4.06 CPU)
Time for update_of_key_big (501): 35 wallclock secs ( 0.05 usr 0.04 sys + 0.00 cusr 0.00 csys = 0.09 CPU)
Testing update with key
Time for update_with_key (300000): 119 wallclock secs (18.44 usr 12.64 sys + 0.00 cusr 0.00 csys = 31.08 CPU)
Time for update_with_key_prefix (100000): 36 wallclock secs ( 6.23 usr 3.85 sys + 0.00 cusr 0.00 csys = 10.08 CPU)
Testing update of all rows
Time for update_big (10): 64 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing left outer join
Time for outer_join_on_key (10:10): 60 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join (10:10): 110 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_found (10:10): 107 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for outer_join_not_found (500:10): 59 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing INSERT INTO ... SELECT
Time for insert_select_1_key (1): 7 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for insert_select_2_keys (1): 9 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for drop table(2): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing delete
Time for delete_key (10000): 4 wallclock secs ( 0.76 usr 0.29 sys + 0.00 cusr 0.00 csys = 1.05 CPU)
Time for delete_all (12): 17 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Insert into table with 16 keys and with a primary key with 16 parts
Time for insert_key (100000): 137 wallclock secs ( 9.91 usr 6.26 sys + 0.00 cusr 0.00 csys = 16.17 CPU)
Testing update of keys
Time for update_of_primary_key_many_keys (256): 47 wallclock secs ( 0.01 usr 0.02 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Deleting rows from the table
Time for delete_big_many_keys (128): 75 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Deleting everything from table
Time for delete_all_many_keys (1): 75 wallclock secs ( 0.03 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.03 CPU)
Total time: 2736 wallclock secs (661.21 usr 182.47 sys + 0.00 cusr 0.00 csys = 843.68 CPU)

View File

@ -1,23 +0,0 @@
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 18:22:00
Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 12 ranges.
Creating table
Inserting 10000 rows
Time to insert (10000): 4 wallclock secs ( 0.81 usr 0.43 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing big selects on the table
Time for select_big (70:17207): 1 wallclock secs ( 0.13 usr 0.06 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for select_range (410:75949): 2 wallclock secs ( 0.65 usr 0.32 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for min_max_on_key (70000): 205 wallclock secs (20.60 usr 3.98 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_on_key (50000): 518 wallclock secs (16.08 usr 3.16 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_group_on_key_parts (1000:0): 61 wallclock secs ( 1.09 usr 0.27 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Testing count(distinct) on the table
Time for count_distinct (1000:2000): 124 wallclock secs ( 0.65 usr 0.16 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_group_on_key (1000:6000): 64 wallclock secs ( 0.37 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_group_on_key_parts (1000:100000): 77 wallclock secs ( 0.93 usr 0.45 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_group (1000:100000): 77 wallclock secs ( 0.94 usr 0.33 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time for count_distinct_big (1000:10000000): 559 wallclock secs (69.04 usr 55.99 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 1692 wallclock secs (111.29 usr 65.22 sys + 0.00 cusr 0.00 csys = 0.00 CPU)

View File

@ -0,0 +1,30 @@
Testing server 'MySQL 3.23.39' at 2001-06-05 14:41:13
Testing the speed of selecting on keys that consist of many parts
The test-table has 10000 rows and the test is done with 500 ranges.
Creating table
Inserting 10000 rows
Time to insert (10000): 5 wallclock secs ( 0.80 usr 0.34 sys + 0.00 cusr 0.00 csys = 1.14 CPU)
Test if the database has a query cache
Time for select_query_cache (10000): 120 wallclock secs ( 3.67 usr 0.53 sys + 0.00 cusr 0.00 csys = 4.20 CPU)
Time for select_query_cache2 (10000): 120 wallclock secs ( 3.80 usr 0.57 sys + 0.00 cusr 0.00 csys = 4.37 CPU)
Testing big selects on the table
Time for select_big (70:17207): 1 wallclock secs ( 0.15 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.22 CPU)
Time for select_range (410:1057904): 201 wallclock secs ( 9.05 usr 3.95 sys + 0.00 cusr 0.00 csys = 13.00 CPU)
Time for min_max_on_key (70000): 222 wallclock secs (23.16 usr 3.67 sys + 0.00 cusr 0.00 csys = 26.83 CPU)
Time for count_on_key (50000): 534 wallclock secs (16.58 usr 2.69 sys + 0.00 cusr 0.00 csys = 19.27 CPU)
Time for count_group_on_key_parts (1000:100000): 58 wallclock secs ( 1.16 usr 0.35 sys + 0.00 cusr 0.00 csys = 1.51 CPU)
Testing count(distinct) on the table
Time for count_distinct_key_prefix (1000:1000): 51 wallclock secs ( 0.45 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.53 CPU)
Time for count_distinct (1000:1000): 60 wallclock secs ( 0.42 usr 0.08 sys + 0.00 cusr 0.00 csys = 0.50 CPU)
Time for count_distinct_2 (1000:1000): 63 wallclock secs ( 0.18 usr 0.03 sys + 0.00 cusr 0.00 csys = 0.21 CPU)
Time for count_distinct_group_on_key (1000:6000): 59 wallclock secs ( 0.51 usr 0.07 sys + 0.00 cusr 0.00 csys = 0.58 CPU)
Time for count_distinct_group_on_key_parts (1000:100000): 194 wallclock secs ( 1.12 usr 0.46 sys + 0.00 cusr 0.00 csys = 1.58 CPU)
Time for count_distinct_group (1000:100000): 194 wallclock secs ( 1.21 usr 0.37 sys + 0.00 cusr 0.00 csys = 1.58 CPU)
Time for count_distinct_big (100:1000000): 67 wallclock secs ( 7.77 usr 3.16 sys + 0.00 cusr 0.00 csys = 10.93 CPU)
Total time: 1949 wallclock secs (70.03 usr 16.42 sys + 0.00 cusr 0.00 csys = 86.45 CPU)

View File

@ -1,14 +0,0 @@
Testing server 'MySQL 3.23.22 beta' at 2000-08-17 18:50:12
Wisconsin benchmark test
Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (31000): 11 wallclock secs ( 1.12 usr 0.93 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Time to delete_big (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Running actual benchmark
Time for wisc_benchmark (114): 4 wallclock secs ( 1.75 usr 0.68 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Total time: 16 wallclock secs ( 2.87 usr 1.61 sys + 0.00 cusr 0.00 csys = 0.00 CPU)

View File

@ -0,0 +1,14 @@
Testing server 'MySQL 3.23.39' at 2001-06-05 15:13:43
Wisconsin benchmark test
Time for create_table (3): 0 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Inserting data
Time to insert (31000): 13 wallclock secs ( 1.59 usr 1.18 sys + 0.00 cusr 0.00 csys = 2.77 CPU)
Time to delete_big (1): 1 wallclock secs ( 0.00 usr 0.00 sys + 0.00 cusr 0.00 csys = 0.00 CPU)
Running actual benchmark
Time for wisc_benchmark (114): 5 wallclock secs ( 2.33 usr 0.52 sys + 0.00 cusr 0.00 csys = 2.85 CPU)
Total time: 19 wallclock secs ( 3.92 usr 1.70 sys + 0.00 cusr 0.00 csys = 5.62 CPU)

View File

@ -25,7 +25,7 @@ use Getopt::Long;
$opt_server="mysql";
$opt_dir="output";
$opt_machine="";
$opt_machine=$opt_cmp="";
$opt_relative=$opt_same_server=$opt_help=$opt_Information=$opt_skip_count=$opt_no_bars=$opt_verbose=0;
GetOptions("Information","help","server=s","cmp=s","machine=s","relative","same-server","dir=s","skip-count","no-bars","html","verbose") || usage();
@ -53,10 +53,6 @@ if ($#ARGV == -1)
@ARGV=glob($files);
$automatic_files=1;
}
else
{
$opt_cmp="";
}
foreach (@ARGV)
{

View File

@ -38,7 +38,7 @@
# as such, and clarify ones such as "mediumint" with comments such as
# "3-byte int" or "same as xxx".
$version="1.56";
$version="1.57";
use DBI;
use Getopt::Long;
@ -1539,12 +1539,24 @@ report("insert INTO ... SELECT ...","insert_select",
"insert into crash_q (a) SELECT crash_me.a from crash_me",
"drop table crash_q $drop_attr");
report_trans("transactions","transactions",
[create_table("crash_q",["a integer not null"],[]),
"insert into crash_q values (1)"],
"select * from crash_q",
"drop table crash_q $drop_attr"
);
if (!defined($limits{"transactions"}))
{
my ($limit,$type);
$limit="transactions";
print "$limit: ";
foreach $type (('', 'type=bdb', 'type=innodb', 'type=gemini'))
{
undef($limits{$limit});
last if (!report_trans($limit,
[create_table("crash_q",["a integer not null"],[],
$type),
"insert into crash_q values (1)"],
"select * from crash_q",
"drop table crash_q $drop_attr"
));
}
print "$limits{$limit}\n";
}
report("atomic updates","atomic_updates",
create_table("crash_q",["a integer not null"],["primary key (a)"]),
@ -2500,8 +2512,7 @@ sub report_result
sub report_trans
{
my ($prompt,$limit,$queries,$check,$clear)=@_;
print "$prompt: ";
my ($limit,$queries,$check,$clear)=@_;
if (!defined($limits{$limit}))
{
eval {undef($dbh->{AutoCommit})};
@ -2518,7 +2529,6 @@ sub report_trans
safe_query($clear);
} else {
$dbh->{AutoCommit} = 1;
safe_query($clear);
save_config_data($limit,"error",$prompt);
}
} else {
@ -2532,8 +2542,7 @@ sub report_trans
}
safe_query($clear);
}
print "$limits{$limit}\n";
return $limits{$limit} ne "no";
return $limits{$limit} ne "yes";
}
@ -2961,9 +2970,11 @@ sub sql_concat
sub create_table
{
my($table_name,$fields,$index) = @_;
my($table_name,$fields,$index,$extra) = @_;
my($query,$nr,$parts,@queries,@index);
$extra="" if (!defined($extra));
$query="create table $table_name (";
$nr=0;
foreach $field (@$fields)
@ -3015,7 +3026,7 @@ sub create_table
}
}
chop($query);
$query.= ')';
$query.= ") $extra";
unshift(@queries,$query);
return @queries;
}

View File

@ -0,0 +1,660 @@
####
#### Hello ... this is a heavily hacked script by Luuk
#### instead of printing the result it makes a nice gif
#### when you want to look at the code ... beware of the
#### ugliest code ever seen .... but it works ...
#### and that's sometimes the only thing you want ... isn't it ...
#### as the original script ... Hope you like it
####
#### Greetz..... Luuk de Boer 1997.
####
## if you want the seconds behind the bar printed or not ...
## or only the one where the bar is too big for the graph ...
## look at line 535 of this program and below ...
## look in sub calculate for allmost all hard/soft settings :-)
# a little program to generate a table of results
# just read all the RUN-*.log files and format them nicely
# Made by Luuk de Boer
# Patched by Monty
use Getopt::Long;
use GD;
$opt_server="mysql";
$opt_cmp="mysql,pg,solid";
$opt_cmp="msql,mysql,pg,solid";
$opt_cmp="empress,mysql,pg,solid";
$opt_dir="output";
$opt_machine="";
$opt_relative=$opt_same_server=$opt_help=$opt_Information=$opt_skip_count=0;
GetOptions("Information","help","server=s","cmp=s","machine=s","relative","same-server","dir=s","skip-count") || usage();
usage() if ($opt_help || $opt_Information);
if ($opt_same_server)
{
$files="$opt_dir/RUN-$opt_server-*$opt_machine";
}
else
{
$files="$opt_dir/RUN-*$opt_machine";
}
$files.= "-cmp-$opt_cmp" if (length($opt_cmp));
$automatic_files=0;
if ($#ARGV == -1)
{
@ARGV=glob($files);
$automatic_files=1;
}
#
# Go trough all RUN files and gather statistics.
#
foreach (@ARGV)
{
$filename = $_;
next if (defined($found{$_})); # remove duplicates
$found{$_}=1;
/RUN-(.*)$/;
$prog = $1;
push(@key_order,$prog);
$next = 0;
open(TMP, "<$filename") || die "Can't open $filename: $!\n";
while (<TMP>)
{
chomp;
if ($next == 0) {
if (/Server version:\s+(\S+.*)/i)
{
$tot{$prog}{'server'} = $1;
}
elsif (/Arguments:\s+(.+)/i)
{
$tot{$prog}{'arguments'} = $1;
# Remove some standard, not informative arguments
$tot{$prog}{'arguments'} =~ s/--log|--use-old-results|--server=\S+|--cmp=\S+|--user=\S+|--pass=\S+|--machine=\S+//g;
$tot{$prog}{'arguments'} =~ s/\s+/ /g;
}
elsif (/Comments:\s+(.+)/i) {
$tot{$prog}{'comments'} = $1;
} elsif (/^(\S+):\s*(estimated\s|)total\stime:\s+(\d+)\s+secs/i)
{
$tmp = $1; $tmp =~ s/://;
$tot{$prog}{$tmp} = [ $3, (length($2) ? "+" : "")];
$op1{$tmp} = $tmp;
} elsif (/Totals per operation:/i) {
$next = 1;
next;
}
}
elsif ($next == 1)
{
if (/^(\S+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s*([+|?])*/)
{
$tot1{$prog}{$1} = [$2,$6,$7];
$op{$1} = $1;
#print "TEST - $_ \n * $prog - $1 - $2 - $6 - $7 ****\n";
# $prog - filename
# $1 - operation
# $2 - time in secs
# $6 - number of loops
# $7 - nothing / + / ? / * => estimated time ...
# get the highest value ....
$highest = ($2/$6) if (($highest < ($2/$6)) && ($1 !~/TOTALS/i));
$gifcount++;
$giftotal += ($2/$6);
}
}
}
}
if (!%op)
{
print "Didn't find any files matching: '$files'\n";
print "Use the --cmp=server,server option to compare benchmarks\n";
exit 1;
}
# everything is loaded ...
# now we have to create a fancy output :-)
# I prefer to redirect scripts instead to force it to file ; Monty
#
# open(RES, ">$resultfile") || die "Can't write to $resultfile: $!\n";
# select(RES)
#
#print <<EOF;
#<cut for this moment>
#
#EOF
if ($opt_relative)
{
# print "Column 1 is in seconds. All other columns are presented relative\n";
# print "to this. 1.00 is the same, bigger numbers indicates slower\n\n";
}
#print "The result logs which where found and the options:\n";
if ($automatic_files)
{
if ($key_order[$i] =~ /^$opt_server/)
{
if ($key_order[$i] =~ /^$opt_server/)
{
unshift(@key_order,$key_order[$i]);
splice(@key_order,$i+1,1);
}
}
}
# extra for mysql and mysql_pgcc
#$number1 = shift(@key_order);
#$number2 = shift(@key_order);
#unshift(@key_order,$number1);
#unshift(@key_order,$number2);
# Print header
$column_count=0;
foreach $key (@key_order)
{
$column_count++;
# printf "%2d %-40.40s: %s %s\n", $column_count, $key,
# $tot{$key}{'server'}, $tot{$key}{'arguments'};
# print "Comments: $tot{$key}{'comments'}\n"
# if ($tot{$key}{'comments'} =~ /\w+/);
}
#print "\n";
$namewidth=$opt_skip_count ? 20 :25;
$colwidth= $opt_relative ? 9 : 6;
print_sep("=");
#printf "%-$namewidth.${namewidth}s|", "Operation";
$count = 1;
foreach $key (@key_order)
{
# printf "%${colwidth}d|", $count;
$count++;
}
#print "\n";
#print_sep("-");
#print_string("Results per test:");
#print_sep("-");
foreach $key (sort {$a cmp $b} keys %op1)
{
# printf "%-$namewidth.${namewidth}s|", $key;
$first=undef();
foreach $server (@key_order)
{
print_value($first,$tot{$server}{$key}->[0],$tot{$server}{$key}->[1]);
$first=$tot{$server}{$key}->[0] if (!defined($first));
}
# print "\n";
}
print_sep("-");
print_string("The results per operation:");
print_sep("-");
$luukcounter = 1;
foreach $key (sort {$a cmp $b} keys %op)
{
next if ($key =~ /TOTALS/i);
$tmp=$key;
$tmp.= " (" . $tot1{$key_order[0]}{$key}->[1] . ")" if (!$skip_count);
# printf "%-$namewidth.${namewidth}s|", $tmp;
$first=undef();
foreach $server (@key_order)
{
print_value($first,$tot1{$server}{$key}->[0],$tot1{$server}{$key}->[2]);
$first=$tot1{$server}{$key}->[0] if (!defined($first));
}
# print "\n";
$luukcounter++;
}
#print_sep("-");
$key="TOTALS";
#printf "%-$namewidth.${namewidth}s|", $key;
$first=undef();
foreach $server (@key_order)
{
# print_value($first,$tot1{$server}{$key}->[0],$tot1{$server}{$key}->[2]);
$first=$tot1{$server}{$key}->[0] if (!defined($first));
}
#print "\n";
#print_sep("=");
&make_gif;
exit 0;
#
# some format functions;
#
sub print_sep
{
my ($sep)=@_;
# print $sep x ($namewidth + (($colwidth+1) * $column_count)+1),"\n";
}
sub print_value
{
my ($first,$value,$flags)=@_;
my ($tmp);
if (defined($value))
{
if (!defined($first) || !$opt_relative)
{
$tmp=sprintf("%d",$value);
}
else
{
$first=1 if (!$first); # Assume that it took one second instead of 0
$tmp= sprintf("%.2f",$value/$first);
}
if (defined($flags))
{
$tmp="+".$tmp if ($flags =~ /\+/);
$tmp="?".$tmp if ($flags =~ /\?/);
}
}
else
{
$tmp="";
}
$tmp= " " x ($colwidth-length($tmp)) . $tmp if (length($tmp) < $colwidth);
# print $tmp . "|";
}
sub print_string
{
my ($str)=@_;
my ($width);
$width=$namewidth + ($colwidth+1)*$column_count;
$str=substr($str,1,$width) if (length($str) > $width);
# print($str," " x ($width - length($str)),"|\n");
}
sub usage
{
exit(0);
}
###########################################
###########################################
###########################################
# making here a gif of the results ... (lets try it :-))
# luuk .... 1997
###########################################
## take care that $highest / $giftotal / $gifcount / $luukcounter
## are getting there value above ... so don't forget them while
## copying the code to some other program ....
sub make_gif {
&gd; # some base things ....
&legend; # make the nice legend
&lines; # yep sometimes you have to print some lines
&gif("gif/benchmark2-".$opt_cmp); # and finally we can print all to a gif file ...
}
##### mmm we are finished now ...
# first we have to calculate some limits and some other stuff
sub calculate {
# here is the list which I have to know to make everything .....
# the small border width ... $sm_border =
# the border default $border =
# the step default ... if it must be calculated then no value $step =
# the highest number $highest =
# the max length of the text of the x borders $max_len_lb=
# the max length of a legend entry $max_len_le=
# number of entries in the legend $num_legen =
# the length of the color blocks for the legend $legend_block=
# the width of the gif ...if it must be calculated - no value $width =
# the height of the gif .. if it must be calculated - no value $height =
# the width of the grey field ' ' ' ' $width_grey=
# the height of the grey field ' ' ' ' $height_grey=
# number of dashed lines $lines=
# if bars must overlap how much they must be overlapped $overlap=
# titlebar title of graph in two colors big $titlebar=
# titlebar1 sub title of graph in small font in black $titlebar1=
# xlabel $xlabel=
# ylabel $ylabel=
# the name of the gif ... $name=
# then the following things must be knows .....
# xlabel below or on the left side ?
# legend yes/no?
# where must the legend be placed?
# must the xlabel be printed horizontal or vertical?
# must the ylabel be printed horizontal or vertical?
# must the graph be a line or a bar graph?
# is a xlabel several different entries or some sub entries of one?
# so xlabel 1 => test1=10, test2=15, test3=7 etc
# or xlabel 1 => test1a=12, test1b=10, test1c=7 etc
# must the bars overlap (only with the second example I think)
# must the number be printed above or next to the bar?
# when must the number be printed .... only when it extends the graph ...???
# the space between the bars .... are that the same width of the bars ...
# or is it a separate space ... defined ???
# must the date printed below or some where else ....
#calculate all space for text and other things ....
$sm_border = 8; # the grey border around ...
$border = 40; #default ...
$left_border = 2.75 * $border; #default ...
$right_border = $border; #default ...
$up_border = $border; #default ...
$down_border = $border; # default ...
$step = ($height - $up_border - $down_border)/ ($luukcounter + (($#key_order + 1) * $luukcounter));
# can set $step to get nice graphs ... and change the format ...
$step = 8; # set hard the step value
$gifavg = ($giftotal/$gifcount);
$highest = 2 * $gifavg;
$highest = 1; # set hard the highest value ...
$xhigh = int($highest + .5 * $highest);
# here to get the max lenght of the test entries ...
# so we can calculate the with of the left border
foreach $oper (sort keys (%op)) {
$max_len_lb = length($oper) if (length($oper) > $max_len_lb);
# print "oper = $oper - $max_len_lb\n";
}
$max_len_lb = $max_len_lb * gdSmallFont->width;
$left_border = (3*$sm_border) + $max_len_lb;
$down_border = (4*$sm_border) + (gdSmallFont->width*(length($xhigh)+3)) + (gdSmallFont->height *2);
$right_border = (3*$sm_border) + 3 + (gdSmallFont->width*(length($highest)+5));
# calculate the space for the legend .....
foreach $key (@key_order) {
$tmp = $key;
$tmp =~ s/-cmp-$opt_cmp//i;
$giflegend = sprintf "%-24.24s: %-40.40s",$tmp,$tot{$key}{'server'};
$max_len_le = length($giflegend) if (length($giflegend) > $max_len_le);
}
$max_len_le = $max_len_le * gdSmallFont->width;
$legend_block = 10; # the length of the block in the legend
$max_high_le = (($#key_order + 1)*(gdSmallFont->height+2)) + (2*$legend_block);
$down_border += $max_high_le;
$up_border = (5 * $sm_border) + gdSmallFont->height + gdLargeFont->height;
print "Here some things we already know ....\n";
# print "luukcounter = $luukcounter (number of tests)\n";
# print "gifcount = $gifcount (number of total entries)\n";
# print "giftotal = $giftotal (total secs)\n";
# print "gifavg = $gifavg\n";
# print "highest = $highest\n";
# print "xhigh = $xhigh\n";
# print "step = $step -- $#key_order\n";
# print "max_len_lb = $max_len_lb\n";
# printf "Small- width %d - height %s\n",gdSmallFont->width,gdSmallFont->height;
# printf "Tiny- width %d - height %s\n",gdTinyFont->width,gdTinyFont->height;
}
sub gd {
&calculate;
$width = 600; # the width ....
$height = 500; # the height ...
$width_greyfield = 430;
# when $step is set ... count the height ....????
$width = $width_greyfield + $left_border + $right_border;
$height = ($step * ($luukcounter + ($luukcounter * ($#key_order + 1)))) + $down_border + $up_border;
$b_width = $width - ($left_border + $right_border); # width within the grey field
$overlap = 0; # how far each colum can fall over each other ...nice :-)
# make the gif image ....
$im = new GD::Image($width,$height);
# allocate the colors to use ...
$white = $im->colorAllocate(255,255,255);
$black = $im->colorAllocate(0,0,0);
$paper_white = $im->colorAllocate(220, 220, 220);
$grey1 = $im->colorAllocate(240, 240, 240);
$grey4 = $im->colorAllocate(229, 229, 229);
$grey2 = $im->colorAllocate(102, 102, 102);
$grey3 = $im->colorAllocate(153, 153, 153);
$red = $im->colorAllocate(205,0,0); # msql
$lred = $im->colorAllocate(255,0,0);
$blue = $im->colorAllocate(0,0,205); # mysql
$lblue = $im->colorAllocate(0,0,255); # mysql_pgcc
$green = $im->colorAllocate(0, 205, 0); # postgres
$lgreen = $im->colorAllocate(0, 255, 0); # pg_fast
$orange = $im->colorAllocate(205,133, 0); # solid
$lorange = $im->colorAllocate(255, 165, 0); # Adabas
$yellow = $im->colorAllocate(205,205,0); # empress
$lyellow = $im->colorAllocate(255,255,0);
$magenta = $im->colorAllocate(255,0,255); # oracle
$lmagenta = $im->colorAllocate(255,200,255);
$cyan = $im->colorAllocate(0,205,205); # sybase
$lcyan = $im->colorAllocate(0,255,255);
$sienna = $im->colorAllocate(139,71,38); # db2
$lsienna = $im->colorAllocate(160,82,45);
$coral = $im->colorAllocate(205,91,69); # Informix
$lcoral = $im->colorAllocate(255,114,86);
$peach = $im->colorAllocate(205,175,149);
$lpeach = $im->colorAllocate(255,218,185);
@colors = ($red, $blue, $green, $orange, $yellow, $magenta, $cyan, $sienna, $coral, $peach);
@lcolors = ($lred, $lblue, $lgreen, $lorange, $lyellow, $lmagenta, $lcyan, $lsienna, $lcoral, $lpeach);
# set a color per server so in every result it has the same color ....
foreach $key (@key_order) {
if ($tot{$key}{'server'} =~ /mysql/i) {
if ($key =~ /mysql_pgcc/i || $key =~ /mysql_odbc/i) {
$tot{$key}{'color'} = $lblue;
} else {
$tot{$key}{'color'} = $blue;
}
} elsif ($tot{$key}{'server'} =~ /msql/i) {
$tot{$key}{'color'} = $lred;
} elsif ($tot{$key}{'server'} =~ /postgres/i) {
if ($key =~ /pg_fast/i) {
$tot{$key}{'color'} = $lgreen;
} else {
$tot{$key}{'color'} = $green;
}
} elsif ($tot{$key}{'server'} =~ /solid/i) {
$tot{$key}{'color'} = $lorange;
} elsif ($tot{$key}{'server'} =~ /empress/i) {
$tot{$key}{'color'} = $lyellow;
} elsif ($tot{$key}{'server'} =~ /oracle/i) {
$tot{$key}{'color'} = $magenta;
} elsif ($tot{$key}{'server'} =~ /sybase/i) {
$tot{$key}{'color'} = $cyan;
} elsif ($tot{$key}{'server'} =~ /db2/i) {
$tot{$key}{'color'} = $sienna;
} elsif ($tot{$key}{'server'} =~ /informix/i) {
$tot{$key}{'color'} = $coral;
} elsif ($tot{$key}{'server'} =~ /microsoft/i) {
$tot{$key}{'color'} = $peach;
} elsif ($tot{$key}{'server'} =~ /access/i) {
$tot{$key}{'color'} = $lpeach;
} elsif ($tot{$key}{'server'} =~ /adabas/i) {
$tot{$key}{'color'} = $lorange;
}
}
# make the nice little borders
# left bar
$poly0 = new GD::Polygon;
$poly0->addPt(0,0);
$poly0->addPt($sm_border,$sm_border);
$poly0->addPt($sm_border,($height - $sm_border));
$poly0->addPt(0,$height);
$im->filledPolygon($poly0,$grey1);
$im->polygon($poly0, $grey4);
# upper bar
$poly3 = new GD::Polygon;
$poly3->addPt(0,0);
$poly3->addPt($sm_border,$sm_border);
$poly3->addPt(($width - $sm_border),$sm_border);
$poly3->addPt($width,0);
$im->polygon($poly3, $grey4);
$tmptime = localtime(time);
$im->string(gdSmallFont,($width - $sm_border - (gdSmallFont->width * length($tmptime))),($height - ($sm_border) - gdSmallFont->height), $tmptime, $grey3);
# right bar
$poly1 = new GD::Polygon;
$poly1->addPt($width,0);
$poly1->addPt(($width - $sm_border),$sm_border);
$poly1->addPt(($width - $sm_border),($height - $sm_border));
$poly1->addPt($width,$height);
$im->filledPolygon($poly1, $grey3);
$im->stringUp(gdSmallFont,($width - 10),($height - (2 * $sm_border)), "Made by Luuk de Boer - 1997 (c)", $blue);
#below bar
$poly2 = new GD::Polygon;
$poly2->addPt(0,$height);
$poly2->addPt($sm_border,($height - $sm_border));
$poly2->addPt(($width - $sm_border),($height - $sm_border));
$poly2->addPt($width,$height);
$im->filledPolygon($poly2, $grey2);
# do the black line around where in you will print ... (must be done at last
# but is hard to develop with ... but the filled grey must be done first :-)
$im->filledRectangle($left_border,$up_border,($width - ($right_border)),($height-$down_border),$grey4);
# print the nice title ...
$titlebar = "MySQL Benchmark results"; # head title ...
$titlebar1 = "Compare $opt_cmp "; # sub title
$header2 = "seconds/test"; # header value
$center = ($width / 2) - ((gdLargeFont->width * length($titlebar)) / 2);
$center1 = ($width / 2) - ((gdSmallFont->width * length($titlebar1)) / 2);
$center2 = ($width_greyfield/2) - ((gdSmallFont->width*length($header2))/2);
$bovenkant = $sm_border * 3;
$bovenkant1 = $bovenkant + gdLargeFont->height + (.5*$sm_border);
$bovenkant2 = $height - $down_border + (1*$sm_border) + (gdSmallFont->width*(length($xhigh)+3));
$im->string(gdLargeFont,($center),($bovenkant + 1), $titlebar, $grey3);
$im->string(gdLargeFont,($center),($bovenkant), $titlebar, $red);
$im->string(gdSmallFont,($center1),($bovenkant1), $titlebar1, $black);
$im->string(gdSmallFont,($left_border + $center2),($bovenkant2), $header2, $black);
$xlength = $width - $left_border - $right_border;
$lines = 10; # hard coded number of dashed lines
$xverh = $xlength / $xhigh;
# print " de verhouding ===> $xverh --- $xlength -- $xhigh \n";
$xstep = ($xhigh / $lines) * $xverh;
$teller = 0;
# make the nice dashed lines and print the values ...
for ($i = 0; $i <= $lines; $i++) {
$st2 = ($left_border) + ($i * $xstep);
$im->dashedLine($st2,($height-$down_border),$st2,($up_border), $grey3);
if (($i != 0) && ($teller == 2)) {
$st3 = sprintf("%.2f", $i*($xhigh/$lines));
$im->stringUp(gdTinyFont,($st2 - (gdSmallFont->height/2)),($height - $down_border +(.5*$sm_border) + (gdSmallFont->width*(length($xhigh)+3))), $st3, $black);
$teller = 0;
}
$teller++;
}
$im->rectangle($left_border,$up_border,($width - ($right_border)),($height-$down_border),$black);
}
sub legend {
# make the legend ...
$legxbegin = $left_border;
$legybegin = $height - $down_border + (2*$sm_border) + (gdSmallFont->width * (length($xhigh) + 3)) + gdSmallFont->height;
$legxend = $legxbegin + $max_len_le + (4*$legend_block);
$legxend = $legxbegin + $width_greyfield;
$legyend = $legybegin + $max_high_le;
$im->filledRectangle($legxbegin,$legybegin,$legxend,$legyend,$grey4);
$im->rectangle($legxbegin,$legybegin,$legxend,$legyend,$black);
# calculate the space for the legend .....
$c = 0; $i = 1;
$legybegin += $legend_block;
foreach $key (@key_order) {
$xtmp = $legxbegin + $legend_block;
$ytmp = $legybegin + ($c * (gdSmallFont->height +2));
$xtmp1 = $xtmp + $legend_block;
$ytmp1 = $ytmp + gdSmallFont->height;
$im->filledRectangle($xtmp,$ytmp,$xtmp1,$ytmp1,$tot{$key}{'color'});
$im->rectangle($xtmp,$ytmp,$xtmp1,$ytmp1,$black);
$tmp = $key;
$tmp =~ s/-cmp-$opt_cmp//i;
$giflegend = sprintf "%-24.24s: %-40.40s",$tmp,$tot{$key}{'server'};
$xtmp2 = $xtmp1 + $legend_block;
$im->string(gdSmallFont,$xtmp2,$ytmp,"$giflegend",$black);
$c++;
$i++;
# print "$c $i -> $giflegend\n";
}
}
sub lines {
$g = 0;
$i = 0;
$ybegin = $up_border + ((($#key_order + 2)/2)*$step);
$xbegin = $left_border;
foreach $key (sort {$a cmp $b} keys %op) {
next if ($key =~ /TOTALS/i);
$c = 0;
# print "key - $key\n";
foreach $server (@key_order) {
$tot1{$server}{$key}->[1] = 1 if ($tot1{$server}{$key}->[1] == 0);
$entry = $tot1{$server}{$key}->[0]/$tot1{$server}{$key}->[1];
$ytmp = $ybegin + ($i * $step) ;
$xtmp = $xbegin + ($entry * $xverh) ;
$ytmp1 = $ytmp + $step;
# print "$server -- $entry --x $xtmp -- y $ytmp - $c\n";
$entry1 = sprintf("%.2f", $entry);
if ($entry < $xhigh) {
$im->filledRectangle($xbegin, $ytmp, $xtmp, $ytmp1, $tot{$server}{'color'});
$im->rectangle($xbegin, $ytmp, $xtmp, $ytmp1, $black);
# print the seconds behind the bar (look below for another entry)
# this entry is for the bars that are not greater then the max width
# of the grey field ...
# $im->string(gdTinyFont,(($xtmp+3),($ytmp),"$entry1",$black));
# if you want the seconds in the color of the bar just uncomment it (below)
# $im->string(gdTinyFont,(($xtmp+3),($ytmp),"$entry1",$tot{$server}{'color'}));
} else {
$im->filledRectangle($xbegin, $ytmp, ($xbegin + ($xhigh*$xverh)), $ytmp1, $tot{$server}{'color'});
$im->rectangle($xbegin, $ytmp, ($xbegin + ($xhigh*$xverh)), $ytmp1, $black);
# print the seconds behind the bar (look below for another entry)
# here is the seconds printed behind the bar is the bar is too big for
# the graph ... (seconds is greater then xhigh ...)
$im->string(gdTinyFont, ($xbegin + ($xhigh*$xverh)+3),($ytmp),"$entry1",$black);
# if you want the seconds in the color of the bar just uncomment it (below)
# $im->string(gdTinyFont, ($xbegin + ($xhigh*$xverh)+3),($ytmp),"$entry1",$colors[$c]);
}
$c++;
$i++;
}
# see if we can center the text between the bars ...
$ytmp2 = $ytmp1 - (((($c)*$step) + gdSmallFont->height)/2);
$im->string(gdSmallFont,($sm_border*2),$ytmp2,$key, $black);
$i++;
}
}
sub gif {
my ($name) = @_;
$name_gif = $name . ".gif";
print "name --> $name_gif\n";
open (GIF, "> $name_gif") || die "Can't open $name_gif: $!\n";
print GIF $im->gif;
close (GIF);
}

View File

@ -1,4 +1,4 @@
#This file is automaticly generated by crash-me 1.54
#This file is automaticly generated by crash-me 1.57
NEG=yes # update of column= -column
Need_cast_for_null=no # Need to cast NULL for arithmetic
@ -36,7 +36,7 @@ constraint_check=no # Column constraints
constraint_check_table=no # Table constraints
constraint_null=yes # NULL constraint (SyBase style)
crash_me_safe=yes # crash me safe
crash_me_version=1.54 # crash me version
crash_me_version=1.57 # crash me version
create_default=yes # default value for column
create_default_func=no # default value function for column
create_if_not_exists=yes # create table if not exists
@ -394,7 +394,7 @@ select_limit2=yes # SELECT with LIMIT #,#
select_string_size=1048565 # constant string size in SELECT
select_table_update=no # Update with sub select
select_without_from=yes # SELECT without FROM
server_version=MySQL 3.23.29 gamma # server version
server_version=MySQL 3.23.39 debug # server version
simple_joins=yes # ANSI SQL simple joins
storage_of_float=round # Storage of float values
subqueries=no # subqueries
@ -402,7 +402,7 @@ table_alias=yes # Table alias
table_name_case=no # case independent table names
table_wildcard=yes # Select table_name.*
temporary_table=yes # temporary tables
transactions=no # transactions
transactions=yes # constant string size in where
truncate_table=yes # truncate
type_extra_abstime=no # Type abstime
type_extra_bfile=no # Type bfile

View File

@ -1,4 +1,4 @@
#This file is automaticly generated by crash-me 1.54
#This file is automaticly generated by crash-me 1.57
NEG=yes # update of column= -column
Need_cast_for_null=no # Need to cast NULL for arithmetic
@ -36,7 +36,7 @@ constraint_check=no # Column constraints
constraint_check_table=no # Table constraints
constraint_null=yes # NULL constraint (SyBase style)
crash_me_safe=yes # crash me safe
crash_me_version=1.54 # crash me version
crash_me_version=1.57 # crash me version
create_default=yes # default value for column
create_default_func=no # default value function for column
create_if_not_exists=yes # create table if not exists
@ -394,7 +394,7 @@ select_limit2=yes # SELECT with LIMIT #,#
select_string_size=1048565 # constant string size in SELECT
select_table_update=no # Update with sub select
select_without_from=yes # SELECT without FROM
server_version=MySQL 3.23.29 gamma # server version
server_version=MySQL 3.23.39 debug # server version
simple_joins=yes # ANSI SQL simple joins
storage_of_float=round # Storage of float values
subqueries=no # subqueries
@ -402,7 +402,7 @@ table_alias=yes # Table alias
table_name_case=no # case independent table names
table_wildcard=yes # Select table_name.*
temporary_table=yes # temporary tables
transactions=no # transactions
transactions=yes # constant string size in where
truncate_table=yes # truncate
type_extra_abstime=no # Type abstime
type_extra_bfile=no # Type bfile

View File

@ -1,4 +1,4 @@
#This file is automaticly generated by crash-me 1.56
#This file is automaticly generated by crash-me 1.57
NEG=yes # update of column= -column
Need_cast_for_null=no # Need to cast NULL for arithmetic
@ -36,7 +36,7 @@ constraint_check=yes # Column constraints
constraint_check_table=yes # Table constraints
constraint_null=yes # NULL constraint (SyBase style)
crash_me_safe=yes # crash me safe
crash_me_version=1.56 # crash me version
crash_me_version=1.57 # crash me version
create_default=yes # default value for column
create_default_func=yes # default value function for column
create_if_not_exists=no # create table if not exists

View File

@ -800,18 +800,29 @@ sub reconnect_on_errors
sub vacuum
{
my ($self,$full_vacuum,$dbh_ref)=@_;
my ($loop_time,$end_time,$dbh);
my ($self,$full_vacuum,$dbh_ref,@tables)=@_;
my ($loop_time,$end_time,$dbh,$table);
if (defined($full_vacuum))
{
$$dbh_ref->disconnect; $$dbh_ref= $self->connect();
}
$dbh=$$dbh_ref;
$loop_time=new Benchmark;
$dbh->do("vacuum") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
$dbh->do("vacuum pg_attributes") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
$dbh->do("vacuum pg_index") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
$dbh->do("vacuum analyze") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
if ($#tables >= 0)
{
foreach $table (@tables)
{
$dbh->do("vacuum analyze $table") || die "Got error: $DBI::errstr when executing 'vacuum analyze $table'\n";
$dbh->do("vacuum $table") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
}
}
else
{
# $dbh->do("vacuum pg_attributes") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
# $dbh->do("vacuum pg_index") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
$dbh->do("vacuum analyze") || die "Got error: $DBI::errstr when executing 'vacuum analyze'\n";
$dbh->do("vacuum") || die "Got error: $DBI::errstr when executing 'vacuum'\n";
}
$end_time=new Benchmark;
print "Time for book-keeping (1): " .
Benchmark::timestr(Benchmark::timediff($end_time, $loop_time),"all") . "\n\n";

View File

@ -250,10 +250,6 @@ if ($limits->{'unique_index'})
timestr(timediff($end_time, $loop_time),"all") . "\n\n";
}
#if ($opt_fast && defined($server->{vacuum}))
#{
# $server->vacuum(1,\$dbh);
#}
####
#### Do some selects on the table
@ -1410,10 +1406,6 @@ if ($limits->{'insert_multi_value'})
print "Time for multiple_value_insert (" . ($opt_loop_count) . "): " .
timestr(timediff($end_time, $loop_time),"all") . "\n\n";
if ($opt_fast && defined($server->{vacuum}))
{
$server->vacuum(1,\$dbh);
}
if ($opt_lock_tables)
{
$sth = $dbh->do("UNLOCK TABLES ") || die $DBI::errstr;

View File

@ -679,7 +679,7 @@ ha_innobase::bas_ext() const
/* out: file extension strings, currently not
used */
{
static const char* ext[] = {".not_used", NullS};
static const char* ext[] = {".InnoDB", NullS};
return(ext);
}
@ -779,6 +779,13 @@ ha_innobase::open(
if (NULL == (ib_table = dict_table_get(norm_name, NULL))) {
fprintf(stderr, "\
Cannot find table %s from the internal data dictionary\n\
of InnoDB though the .frm file for the table exists. Maybe you have deleted\n\
and created again an InnoDB database but forgotten to delete the\n\
corresponding .frm files of old InnoDB tables?\n",
norm_name);
free_share(share);
my_free((char*) upd_buff, MYF(0));
my_errno = ENOENT;

View File

@ -1758,7 +1758,7 @@ Item_func_get_user_var::val_str(String *str)
return NULL;
switch (entry->type) {
case REAL_RESULT:
str->set(*(double*) entry->value);
str->set(*(double*) entry->value,decimals);
break;
case INT_RESULT:
str->set(*(longlong*) entry->value);

View File

@ -2839,7 +2839,7 @@ struct show_var_st init_vars[]= {
{"innodb_log_arch_dir", (char*) &innobase_log_arch_dir, SHOW_CHAR_PTR},
{"innodb_log_archive", (char*) &innobase_log_archive, SHOW_MY_BOOL},
{"innodb_log_group_home_dir", (char*) &innobase_log_group_home_dir, SHOW_CHAR_PTR},
{"innodb_unix_file_flush_method", (char*) &innobase_unix_file_flush_method, SHOW_CHAR_PTR},
{"innodb_flush_method", (char*) &innobase_unix_file_flush_method, SHOW_CHAR_PTR},
#endif
{"interactive_timeout", (char*) &net_interactive_timeout, SHOW_LONG},
{"join_buffer_size", (char*) &join_buff_size, SHOW_LONG},
@ -3113,6 +3113,7 @@ static void usage(void)
puts("\
--innodb_data_home_dir=dir The common part for Innodb table spaces\n\
--innodb_data_file_path=dir Path to individual files and their sizes\n\
--innodb_flush_method=# Which method to flush data\n\
--innodb_flush_log_at_trx_commit[=#]\n\
Set to 0 if you don't want to flush logs\n\
--innodb_log_arch_dir=dir Where full logs should be archived\n\

View File

@ -384,6 +384,9 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh,
thd->in_lock_tables=1;
result=reopen_tables(thd,1,1);
thd->in_lock_tables=0;
/* Set version for table */
for (TABLE *table=thd->open_tables; table ; table=table->next)
table->version=refresh_version;
}
VOID(pthread_mutex_unlock(&LOCK_open));
if (if_wait_for_refresh)

View File

@ -1362,6 +1362,7 @@ select_create::prepare(List<Item> &values)
if (info.handle_duplicates == DUP_IGNORE ||
info.handle_duplicates == DUP_REPLACE)
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
table->file->deactivate_non_unique_index((ha_rows) 0);
DBUG_RETURN(0);
}

View File

@ -221,6 +221,13 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
db_options|=HA_OPTION_PACK_RECORD;
file=get_new_handler((TABLE*) 0, create_info->db_type);
if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) &&
(file->option_flag() & HA_NO_TEMP_TABLES))
{
my_error(ER_ILLEGAL_HA,MYF(0),table_name);
DBUG_RETURN(-1);
}
/* Don't pack keys in old tables if the user has requested this */
while ((sql_field=it++))
@ -1240,7 +1247,16 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name,
{
if (drop->type == Alter_drop::COLUMN &&
!my_strcasecmp(field->field_name, drop->name))
{
/* Reset auto_increment value if it was dropped */
if (MTYP_TYPENR(field->unireg_check) == Field::NEXT_NUMBER &&
!(create_info->used_fields & HA_CREATE_USED_AUTO))
{
create_info->auto_increment_value=0;
create_info->used_fields|=HA_CREATE_USED_AUTO;
}
break;
}
}
if (drop)
{

View File

@ -360,6 +360,7 @@ fi
%attr(755, root, root) /usr/bin/mysql
%attr(755, root, root) /usr/bin/mysqlaccess
%attr(755, root, root) /usr/bin/mysqladmin
%attr(755, root, root) /usr/bin/mysqlcheck
%attr(755, root, root) /usr/bin/mysql_find_rows
%attr(755, root, root) /usr/bin/mysqldump
%attr(755, root, root) /usr/bin/mysqlimport