1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-30 16:24:05 +03:00

Merge zim.(none):/home/brian/mysql/dep-5.1

into  zim.(none):/home/brian/mysql/remove-bdb-5.1
This commit is contained in:
brian@zim.(none)
2006-08-14 21:26:33 -07:00
88 changed files with 5314 additions and 16278 deletions

View File

@ -38,7 +38,6 @@ EXTRA_DIST = FINISH.sh \
compile-pentium-debug-max \ compile-pentium-debug-max \
compile-pentium-debug-max-no-embedded \ compile-pentium-debug-max-no-embedded \
compile-pentium-debug-max-no-ndb \ compile-pentium-debug-max-no-ndb \
compile-pentium-debug-no-bdb \
compile-pentium-debug-openssl \ compile-pentium-debug-openssl \
compile-pentium-debug-yassl \ compile-pentium-debug-yassl \
compile-pentium-gcov \ compile-pentium-gcov \

View File

@ -52,7 +52,6 @@ fi
--with-csv-storage-engine \ --with-csv-storage-engine \
--with-example-storage-engine \ --with-example-storage-engine \
--with-federated-storage-engine \ --with-federated-storage-engine \
--with-berkeley-db \
--with-innodb \ --with-innodb \
--with-ssl \ --with-ssl \
--enable-thread-safe-client \ --enable-thread-safe-client \

View File

@ -1,9 +0,0 @@
#! /bin/sh
path=`dirname $0`
. "$path/SETUP.sh"
extra_flags="$pentium_cflags $debug_cflags"
extra_configs="$pentium_configs $debug_configs --without-berkeley-db $static_link"
. "$path/FINISH.sh"

View File

@ -37,7 +37,7 @@ gmake -k clean || true
path=`dirname $0` path=`dirname $0`
. "$path/autorun.sh" . "$path/autorun.sh"
CFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wimplicit-int -Wparentheses -Wsign-compare -Wwrite-strings -Wunused -DHAVE_purify -DEXTRA_DEBUG -O2" CXX=gcc CXXLD=g++ CXXFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor -felide-constructors -fno-exceptions -fno-rtti -DHAVE_purify -DEXTRA_DEBUG -O2" ./configure --prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-berkeley-db --with-embedded-server --with-innodb $EXTRA_CONFIG_FLAGS CFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wimplicit-int -Wparentheses -Wsign-compare -Wwrite-strings -Wunused -DHAVE_purify -DEXTRA_DEBUG -O2" CXX=gcc CXXLD=g++ CXXFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor -felide-constructors -fno-exceptions -fno-rtti -DHAVE_purify -DEXTRA_DEBUG -O2" ./configure --prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-embedded-server --with-innodb $EXTRA_CONFIG_FLAGS
gmake -j 4 gmake -j 4

View File

@ -131,9 +131,6 @@ ADD_SUBDIRECTORY(client)
IF(WITH_ARCHIVE_STORAGE_ENGINE) IF(WITH_ARCHIVE_STORAGE_ENGINE)
ADD_SUBDIRECTORY(storage/archive) ADD_SUBDIRECTORY(storage/archive)
ENDIF(WITH_ARCHIVE_STORAGE_ENGINE) ENDIF(WITH_ARCHIVE_STORAGE_ENGINE)
IF(WITH_BERKELEY_STORAGE_ENGINE)
ADD_SUBDIRECTORY(storage/bdb)
ENDIF(WITH_BERKELEY_STORAGE_ENGINE)
IF(WITH_BLACKHOLE_STORAGE_ENGINE) IF(WITH_BLACKHOLE_STORAGE_ENGINE)
ADD_SUBDIRECTORY(storage/blackhole) ADD_SUBDIRECTORY(storage/blackhole)
ENDIF(WITH_BLACKHOLE_STORAGE_ENGINE) ENDIF(WITH_BLACKHOLE_STORAGE_ENGINE)

View File

@ -32,7 +32,6 @@ sinclude(config/ac-macros/check_cpu.m4)
sinclude(config/ac-macros/character_sets.m4) sinclude(config/ac-macros/character_sets.m4)
sinclude(config/ac-macros/compiler_flag.m4) sinclude(config/ac-macros/compiler_flag.m4)
sinclude(config/ac-macros/plugins.m4) sinclude(config/ac-macros/plugins.m4)
sinclude(config/ac-macros/ha_berkeley.m4)
sinclude(config/ac-macros/ha_ndbcluster.m4) sinclude(config/ac-macros/ha_ndbcluster.m4)
sinclude(config/ac-macros/large_file.m4) sinclude(config/ac-macros/large_file.m4)
sinclude(config/ac-macros/misc.m4) sinclude(config/ac-macros/misc.m4)
@ -2142,12 +2141,6 @@ MYSQL_CHECK_SSL
# functions tested above # functions tested above
#-------------------------------------------------------------------- #--------------------------------------------------------------------
MYSQL_STORAGE_ENGINE(berkeley, berkeley-db, [BerkeleyDB Storage Engine],
[Transactional Tables using BerkeleyDB], [max,max-no-ndb])
MYSQL_PLUGIN_DIRECTORY(berkeley,[storage/bdb])
MYSQL_PLUGIN_STATIC(berkeley, [[\$(bdb_libs_with_path)]])
MYSQL_PLUGIN_ACTIONS(berkeley, [MYSQL_SETUP_BERKELEY_DB])
MYSQL_STORAGE_ENGINE(blackhole,,[Blackhole Storage Engine], MYSQL_STORAGE_ENGINE(blackhole,,[Blackhole Storage Engine],
[Basic Write-only Read-never tables], [max,max-no-ndb]) [Basic Write-only Read-never tables], [max,max-no-ndb])
MYSQL_PLUGIN_DIRECTORY(blackhole, [storage/blackhole]) MYSQL_PLUGIN_DIRECTORY(blackhole, [storage/blackhole])

View File

@ -16,7 +16,6 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
${CMAKE_SOURCE_DIR}/sql ${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/regex ${CMAKE_SOURCE_DIR}/regex
${CMAKE_SOURCE_DIR}/extra/yassl/include ${CMAKE_SOURCE_DIR}/extra/yassl/include
${CMAKE_SOURCE_DIR}/storage/bdb/build_win32
${CMAKE_SOURCE_DIR}/zlib ${CMAKE_SOURCE_DIR}/zlib
) )
@ -84,9 +83,6 @@ ENDIF(WITH_EXAMPLE_STORAGE_ENGINE)
IF(WITH_INNOBASE_STORAGE_ENGINE) IF(WITH_INNOBASE_STORAGE_ENGINE)
ADD_DEPENDENCIES(mysqlserver innobase) ADD_DEPENDENCIES(mysqlserver innobase)
ENDIF(WITH_INNOBASE_STORAGE_ENGINE) ENDIF(WITH_INNOBASE_STORAGE_ENGINE)
IF(WITH_BERKELEY_STORAGE_ENGINE)
ADD_DEPENDENCIES(mysqlserver bdb)
ENDIF(WITH_BERKELEY_STORAGE_ENGINE)
ADD_LIBRARY(libmysqld MODULE cmake_dummy.c libmysqld.def) ADD_LIBRARY(libmysqld MODULE cmake_dummy.c libmysqld.def)
TARGET_LINK_LIBRARIES(libmysqld wsock32) TARGET_LINK_LIBRARIES(libmysqld wsock32)

View File

@ -45,7 +45,7 @@ noinst_HEADERS = embedded_priv.h emb_qcache.h
sqlsources = derror.cc field.cc field_conv.cc strfunc.cc filesort.cc \ sqlsources = derror.cc field.cc field_conv.cc strfunc.cc filesort.cc \
ha_heap.cc ha_myisam.cc ha_myisammrg.cc \ ha_heap.cc ha_myisam.cc ha_myisammrg.cc \
ha_innodb.cc ha_berkeley.cc ha_federated.cc ha_ndbcluster.cc \ ha_innodb.cc ha_federated.cc ha_ndbcluster.cc \
ha_ndbcluster_binlog.cc ha_partition.cc \ ha_ndbcluster_binlog.cc ha_partition.cc \
handler.cc sql_handler.cc \ handler.cc sql_handler.cc \
hostname.cc init.cc password.c \ hostname.cc init.cc password.c \
@ -96,10 +96,6 @@ yassl_inc_libs= $(top_srcdir)/extra/yassl/src/.libs/libyassl.a \
endif endif
# Storage engine specific compilation options # Storage engine specific compilation options
ha_berkeley.o: ha_berkeley.cc
$(CXXCOMPILE) @bdb_includes@ $(LM_CFLAGS) -c $<
ha_ndbcluster.o:ha_ndbcluster.cc ha_ndbcluster.o:ha_ndbcluster.cc
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $< $(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<

View File

@ -3,7 +3,6 @@
# #
-- source include/not_embedded.inc -- source include/not_embedded.inc
-- source include/have_bdb.inc
-- source include/have_innodb.inc -- source include/have_innodb.inc
-- source include/have_debug.inc -- source include/have_debug.inc
@ -12,7 +11,7 @@ drop table if exists t1, t2;
--enable_warnings --enable_warnings
reset master; reset master;
create table t1 (a int) engine=bdb; create table t1 (a int) engine=innodb;
create table t2 (a int) engine=innodb; create table t2 (a int) engine=innodb;
begin; begin;
insert t1 values (5); insert t1 values (5);

View File

@ -1,4 +0,0 @@
-- require r/have_bdb.require
disable_query_log;
show variables like "have_bdb";
enable_query_log;

View File

@ -80,7 +80,7 @@ basedir=.
EXTRA_ARG="--language=../sql/share/english/ --character-sets-dir=../sql/share/charsets/" EXTRA_ARG="--language=../sql/share/english/ --character-sets-dir=../sql/share/charsets/"
fi fi
mysqld_boot=" $execdir/mysqld --no-defaults --bootstrap --skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb --skip-ndbcluster --skip-bdb --tmpdir=. $EXTRA_ARG" mysqld_boot=" $execdir/mysqld --no-defaults --bootstrap --skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb --skip-ndbcluster --tmpdir=. $EXTRA_ARG"
echo "running $mysqld_boot" echo "running $mysqld_boot"
if $scriptdir/mysql_create_system_tables test $mdata $hostname | $mysqld_boot if $scriptdir/mysql_create_system_tables test $mdata $hostname | $mysqld_boot

View File

@ -2122,7 +2122,6 @@ sub install_db ($$) {
mtr_add_arg($args, "--datadir=%s", $data_dir); mtr_add_arg($args, "--datadir=%s", $data_dir);
mtr_add_arg($args, "--skip-innodb"); mtr_add_arg($args, "--skip-innodb");
mtr_add_arg($args, "--skip-ndbcluster"); mtr_add_arg($args, "--skip-ndbcluster");
mtr_add_arg($args, "--skip-bdb");
mtr_add_arg($args, "--tmpdir=."); mtr_add_arg($args, "--tmpdir=.");
if ( ! $opt_netware ) if ( ! $opt_netware )
@ -2215,7 +2214,6 @@ basedir = $path_my_basedir
server_id = $server_id server_id = $server_id
skip-stack-trace skip-stack-trace
skip-innodb skip-innodb
skip-bdb
skip-ndbcluster skip-ndbcluster
EOF EOF
; ;
@ -2629,7 +2627,6 @@ sub mysqld_arguments ($$$$$) {
if ( $opt_valgrind_mysqld ) if ( $opt_valgrind_mysqld )
{ {
mtr_add_arg($args, "%s--skip-safemalloc", $prefix); mtr_add_arg($args, "%s--skip-safemalloc", $prefix);
mtr_add_arg($args, "%s--skip-bdb", $prefix);
} }
my $pidfile; my $pidfile;

View File

@ -536,8 +536,8 @@ while test $# -gt 0; do
--valgrind | --valgrind-all) --valgrind | --valgrind-all)
find_valgrind; find_valgrind;
VALGRIND=$FIND_VALGRIND VALGRIND=$FIND_VALGRIND
EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-safemalloc --skip-bdb" EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-safemalloc"
EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-safemalloc --skip-bdb" EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-safemalloc"
SLEEP_TIME_AFTER_RESTART=10 SLEEP_TIME_AFTER_RESTART=10
SLEEP_TIME_FOR_DELETE=60 SLEEP_TIME_FOR_DELETE=60
USE_RUNNING_SERVER=0 USE_RUNNING_SERVER=0

View File

@ -6,26 +6,26 @@ Table Op Msg_type Msg_text
test.t4 backup error Failed copying .frm file (errno: X) test.t4 backup error Failed copying .frm file (errno: X)
test.t4 backup status Operation failed test.t4 backup status Operation failed
Warnings: Warnings:
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
Error 1 Can't create/write to file 'MYSQLTEST_VARDIR/bogus/t4.frm' (Errcode: X) Error 1 Can't create/write to file 'MYSQLTEST_VARDIR/bogus/t4.frm' (Errcode: X)
backup table t4 to '../tmp'; backup table t4 to '../tmp';
Table Op Msg_type Msg_text Table Op Msg_type Msg_text
test.t4 backup status OK test.t4 backup status OK
Warnings: Warnings:
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
backup table t4 to '../tmp'; backup table t4 to '../tmp';
Table Op Msg_type Msg_text Table Op Msg_type Msg_text
test.t4 backup error Failed copying .frm file (errno: X) test.t4 backup error Failed copying .frm file (errno: X)
test.t4 backup status Operation failed test.t4 backup status Operation failed
Warnings: Warnings:
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
Error 1 Can't create/write to file 'MYSQLTEST_VARDIR/tmp/t4.frm' (Errcode: X) Error 1 Can't create/write to file 'MYSQLTEST_VARDIR/tmp/t4.frm' (Errcode: X)
drop table t4; drop table t4;
restore table t4 from '../tmp'; restore table t4 from '../tmp';
Table Op Msg_type Msg_text Table Op Msg_type Msg_text
test.t4 restore status OK test.t4 restore status OK
Warnings: Warnings:
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
select count(*) from t4; select count(*) from t4;
count(*) count(*)
0 0
@ -35,19 +35,19 @@ backup table t1 to '../tmp';
Table Op Msg_type Msg_text Table Op Msg_type Msg_text
test.t1 backup status OK test.t1 backup status OK
Warnings: Warnings:
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
drop table t1; drop table t1;
restore table t1 from '../bogus'; restore table t1 from '../bogus';
Table Op Msg_type Msg_text Table Op Msg_type Msg_text
t1 restore error Failed copying .frm file t1 restore error Failed copying .frm file
Warnings: Warnings:
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
Error 29 File 'MYSQLTEST_VARDIR/bogus/t1.frm' not found (Errcode: X) Error 29 File 'MYSQLTEST_VARDIR/bogus/t1.frm' not found (Errcode: X)
restore table t1 from '../tmp'; restore table t1 from '../tmp';
Table Op Msg_type Msg_text Table Op Msg_type Msg_text
test.t1 restore status OK test.t1 restore status OK
Warnings: Warnings:
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
select n from t1; select n from t1;
n n
23 23
@ -62,7 +62,7 @@ Table Op Msg_type Msg_text
test.t2 backup status OK test.t2 backup status OK
test.t3 backup status OK test.t3 backup status OK
Warnings: Warnings:
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
drop table t1,t2,t3; drop table t1,t2,t3;
restore table t1,t2,t3 from '../tmp'; restore table t1,t2,t3 from '../tmp';
Table Op Msg_type Msg_text Table Op Msg_type Msg_text
@ -70,7 +70,7 @@ test.t1 restore status OK
test.t2 restore status OK test.t2 restore status OK
test.t3 restore status OK test.t3 restore status OK
Warnings: Warnings:
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
select n from t1; select n from t1;
n n
23 23
@ -91,7 +91,7 @@ restore table t1 from '../tmp';
Table Op Msg_type Msg_text Table Op Msg_type Msg_text
test.t1 restore status OK test.t1 restore status OK
Warnings: Warnings:
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
rename table t1 to t5; rename table t1 to t5;
lock tables t5 write; lock tables t5 write;
backup table t5 to '../tmp'; backup table t5 to '../tmp';
@ -99,7 +99,7 @@ unlock tables;
Table Op Msg_type Msg_text Table Op Msg_type Msg_text
test.t5 backup status OK test.t5 backup status OK
Warnings: Warnings:
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead. Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
drop table t5; drop table t5;
DROP TABLE IF EXISTS `t+1`; DROP TABLE IF EXISTS `t+1`;
CREATE TABLE `t+1` (c1 INT); CREATE TABLE `t+1` (c1 INT);

View File

@ -1,11 +0,0 @@
drop table if exists t1;
create table t1(objid BIGINT not null, tablename varchar(64), oid BIGINT not null, test BIGINT, PRIMARY KEY (objid), UNIQUE(tablename)) engine=BDB;
insert into t1 values(1, 't1',4,9);
insert into t1 values(2, 'metatable',1,9);
insert into t1 values(3, 'metaindex',1,9 );
select * from t1;
objid tablename oid test
1 t1 4 9
2 metatable 1 9
3 metaindex 1 9
alter table t1 drop column test;

View File

@ -1,6 +0,0 @@
select * from t1;
objid tablename oid
1 t1 4
2 metatable 1
3 metaindex 1
drop table t1;

View File

@ -1,39 +0,0 @@
drop table if exists t1;
CREATE TABLE t1 (
ChargeID int(10) unsigned NOT NULL auto_increment,
ServiceID int(10) unsigned DEFAULT '0' NOT NULL,
ChargeDate date DEFAULT '0000-00-00' NOT NULL,
ChargeAmount decimal(20,2) DEFAULT '0.00' NOT NULL,
FedTaxes decimal(20,2) DEFAULT '0.00' NOT NULL,
ProvTaxes decimal(20,2) DEFAULT '0.00' NOT NULL,
ChargeStatus enum('New','Auth','Unauth','Sale','Denied','Refund')
DEFAULT 'New' NOT NULL,
ChargeAuthorizationMessage text,
ChargeComment text,
ChargeTimeStamp varchar(20),
PRIMARY KEY (ChargeID),
KEY ServiceID (ServiceID),
KEY ChargeDate (ChargeDate)
) engine=BDB;
BEGIN;
INSERT INTO t1
VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now');
COMMIT;
BEGIN;
UPDATE t1 SET ChargeAuthorizationMessage = 'blablabla' WHERE
ChargeID = 1;
COMMIT;
INSERT INTO t1
VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now');
select * from t1;
ChargeID ServiceID ChargeDate ChargeAmount FedTaxes ProvTaxes ChargeStatus ChargeAuthorizationMessage ChargeComment ChargeTimeStamp
1 1 2001-03-01 1.00 1.00 1.00 New blablabla NULL now
2 1 2001-03-01 1.00 1.00 1.00 New NULL NULL now
drop table t1;
create table t1 (a int) engine=bdb;
set autocommit=0;
insert into t1 values(1);
analyze table t1;
Table Op Msg_type Msg_text
test.t1 analyze status OK
drop table t1;

View File

@ -1,31 +0,0 @@
drop table if exists t1,t2;
create table t1 (id integer, x integer) engine=BDB;
create table t2 (id integer, x integer) engine=BDB;
insert into t1 values(0, 0);
insert into t2 values(0, 0);
set autocommit=0;
update t1 set x = 1 where id = 0;
set autocommit=0;
update t2 set x = 1 where id = 0;
select x from t1 where id = 0;
select x from t2 where id = 0;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
commit;
x
1
commit;
select * from t1;
id x
0 1
select * from t2;
id x
0 1
commit;
select * from t1;
id x
0 1
select * from t2;
id x
0 1
commit;
drop table t1,t2;

File diff suppressed because it is too large Load Diff

View File

@ -1,99 +0,0 @@
drop table if exists t1, t2, t3;
flush status;
set autocommit=0;
create table t1 (a int not null) engine=bdb;
insert into t1 values (1),(2),(3);
select * from t1;
a
1
2
3
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
drop table t1;
set autocommit=1;
create table t1 (a int not null) engine=bdb;
begin;
insert into t1 values (1),(2),(3);
select * from t1;
a
1
2
3
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 0
drop table t1;
create table t1 (a int not null) engine=bdb;
create table t2 (a int not null) engine=bdb;
create table t3 (a int not null) engine=bdb;
insert into t1 values (1),(2);
insert into t2 values (1),(2);
insert into t3 values (1),(2);
select * from t1;
a
1
2
select * from t2;
a
1
2
select * from t3;
a
1
2
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 3
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
begin;
select * from t1;
a
1
2
select * from t2;
a
1
2
select * from t3;
a
1
2
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 3
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
insert into t1 values (3);
insert into t2 values (3);
insert into t1 values (4);
select * from t1;
a
1
2
3
4
select * from t2;
a
1
2
3
select * from t3;
a
1
2
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 3
show status like "Qcache_hits";
Variable_name Value
Qcache_hits 0
commit;
show status like "Qcache_queries_in_cache";
Variable_name Value
Qcache_queries_in_cache 1
drop table if exists t1, t2, t3;

View File

@ -1,462 +0,0 @@
SET storage_engine=bdb;
DROP TABLE IF EXISTS t1, gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
CREATE TABLE gis_point (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g POINT);
CREATE TABLE gis_line (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g LINESTRING);
CREATE TABLE gis_polygon (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g POLYGON);
CREATE TABLE gis_multi_point (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g MULTIPOINT);
CREATE TABLE gis_multi_line (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g MULTILINESTRING);
CREATE TABLE gis_multi_polygon (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g MULTIPOLYGON);
CREATE TABLE gis_geometrycollection (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g GEOMETRYCOLLECTION);
CREATE TABLE gis_geometry (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g GEOMETRY);
SHOW CREATE TABLE gis_point;
Table Create Table
gis_point CREATE TABLE `gis_point` (
`fid` int(11) NOT NULL AUTO_INCREMENT,
`g` point DEFAULT NULL,
PRIMARY KEY (`fid`)
) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1
SHOW FIELDS FROM gis_point;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g point YES NULL
SHOW FIELDS FROM gis_line;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g linestring YES NULL
SHOW FIELDS FROM gis_polygon;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g polygon YES NULL
SHOW FIELDS FROM gis_multi_point;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g multipoint YES NULL
SHOW FIELDS FROM gis_multi_line;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g multilinestring YES NULL
SHOW FIELDS FROM gis_multi_polygon;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g multipolygon YES NULL
SHOW FIELDS FROM gis_geometrycollection;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g geometrycollection YES NULL
SHOW FIELDS FROM gis_geometry;
Field Type Null Key Default Extra
fid int(11) NO PRI NULL auto_increment
g geometry YES NULL
INSERT INTO gis_point VALUES
(101, PointFromText('POINT(10 10)')),
(102, PointFromText('POINT(20 10)')),
(103, PointFromText('POINT(20 20)')),
(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)'))));
INSERT INTO gis_line VALUES
(105, LineFromText('LINESTRING(0 0,0 10,10 0)')),
(106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')),
(107, LineStringFromWKB(LineString(Point(10, 10), Point(40, 10))));
INSERT INTO gis_polygon VALUES
(108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')),
(109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')),
(110, PolyFromWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0)))));
INSERT INTO gis_multi_point VALUES
(111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')),
(112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')),
(113, MPointFromWKB(MultiPoint(Point(3, 6), Point(4, 10))));
INSERT INTO gis_multi_line VALUES
(114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')),
(115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')),
(116, MLineFromWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7)))));
INSERT INTO gis_multi_polygon VALUES
(117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
(118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
(119, MPolyFromWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3))))));
INSERT INTO gis_geometrycollection VALUES
(120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')),
(121, GeometryFromWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9)))));
INSERT into gis_geometry SELECT * FROM gis_point;
INSERT into gis_geometry SELECT * FROM gis_line;
INSERT into gis_geometry SELECT * FROM gis_polygon;
INSERT into gis_geometry SELECT * FROM gis_multi_point;
INSERT into gis_geometry SELECT * FROM gis_multi_line;
INSERT into gis_geometry SELECT * FROM gis_multi_polygon;
INSERT into gis_geometry SELECT * FROM gis_geometrycollection;
SELECT fid, AsText(g) FROM gis_point ORDER by fid;
fid AsText(g)
101 POINT(10 10)
102 POINT(20 10)
103 POINT(20 20)
104 POINT(10 20)
SELECT fid, AsText(g) FROM gis_line ORDER by fid;
fid AsText(g)
105 LINESTRING(0 0,0 10,10 0)
106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
107 LINESTRING(10 10,40 10)
SELECT fid, AsText(g) FROM gis_polygon ORDER by fid;
fid AsText(g)
108 POLYGON((10 10,20 10,20 20,10 20,10 10))
109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
110 POLYGON((0 0,30 0,30 30,0 0))
SELECT fid, AsText(g) FROM gis_multi_point ORDER by fid;
fid AsText(g)
111 MULTIPOINT(0 0,10 10,10 20,20 20)
112 MULTIPOINT(1 1,11 11,11 21,21 21)
113 MULTIPOINT(3 6,4 10)
SELECT fid, AsText(g) FROM gis_multi_line ORDER by fid;
fid AsText(g)
114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
115 MULTILINESTRING((10 48,10 21,10 0))
116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
SELECT fid, AsText(g) FROM gis_multi_polygon ORDER by fid;
fid AsText(g)
117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
SELECT fid, AsText(g) FROM gis_geometrycollection ORDER by fid;
fid AsText(g)
120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
SELECT fid, AsText(g) FROM gis_geometry ORDER by fid;
fid AsText(g)
101 POINT(10 10)
102 POINT(20 10)
103 POINT(20 20)
104 POINT(10 20)
105 LINESTRING(0 0,0 10,10 0)
106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
107 LINESTRING(10 10,40 10)
108 POLYGON((10 10,20 10,20 20,10 20,10 10))
109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
110 POLYGON((0 0,30 0,30 30,0 0))
111 MULTIPOINT(0 0,10 10,10 20,20 20)
112 MULTIPOINT(1 1,11 11,11 21,21 21)
113 MULTIPOINT(3 6,4 10)
114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
115 MULTILINESTRING((10 48,10 21,10 0))
116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
SELECT fid, Dimension(g) FROM gis_geometry ORDER by fid;
fid Dimension(g)
101 0
102 0
103 0
104 0
105 1
106 1
107 1
108 2
109 2
110 2
111 0
112 0
113 0
114 1
115 1
116 1
117 2
118 2
119 2
120 1
121 1
SELECT fid, GeometryType(g) FROM gis_geometry ORDER by fid;
fid GeometryType(g)
101 POINT
102 POINT
103 POINT
104 POINT
105 LINESTRING
106 LINESTRING
107 LINESTRING
108 POLYGON
109 POLYGON
110 POLYGON
111 MULTIPOINT
112 MULTIPOINT
113 MULTIPOINT
114 MULTILINESTRING
115 MULTILINESTRING
116 MULTILINESTRING
117 MULTIPOLYGON
118 MULTIPOLYGON
119 MULTIPOLYGON
120 GEOMETRYCOLLECTION
121 GEOMETRYCOLLECTION
SELECT fid, IsEmpty(g) FROM gis_geometry ORDER by fid;
fid IsEmpty(g)
101 0
102 0
103 0
104 0
105 0
106 0
107 0
108 0
109 0
110 0
111 0
112 0
113 0
114 0
115 0
116 0
117 0
118 0
119 0
120 0
121 0
SELECT fid, AsText(Envelope(g)) FROM gis_geometry ORDER by fid;
fid AsText(Envelope(g))
101 POLYGON((10 10,10 10,10 10,10 10,10 10))
102 POLYGON((20 10,20 10,20 10,20 10,20 10))
103 POLYGON((20 20,20 20,20 20,20 20,20 20))
104 POLYGON((10 20,10 20,10 20,10 20,10 20))
105 POLYGON((0 0,10 0,10 10,0 10,0 0))
106 POLYGON((10 10,20 10,20 20,10 20,10 10))
107 POLYGON((10 10,40 10,40 10,10 10,10 10))
108 POLYGON((10 10,20 10,20 20,10 20,10 10))
109 POLYGON((0 0,50 0,50 50,0 50,0 0))
110 POLYGON((0 0,30 0,30 30,0 30,0 0))
111 POLYGON((0 0,20 0,20 20,0 20,0 0))
112 POLYGON((1 1,21 1,21 21,1 21,1 1))
113 POLYGON((3 6,4 6,4 10,3 10,3 6))
114 POLYGON((10 0,16 0,16 48,10 48,10 0))
115 POLYGON((10 0,10 0,10 48,10 48,10 0))
116 POLYGON((1 2,21 2,21 8,1 8,1 2))
117 POLYGON((28 0,84 0,84 42,28 42,28 0))
118 POLYGON((28 0,84 0,84 42,28 42,28 0))
119 POLYGON((0 0,3 0,3 3,0 3,0 0))
120 POLYGON((0 0,10 0,10 10,0 10,0 0))
121 POLYGON((3 6,44 6,44 9,3 9,3 6))
explain extended select Dimension(g), GeometryType(g), IsEmpty(g), AsText(Envelope(g)) from gis_geometry;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE gis_geometry ALL NULL NULL NULL NULL 21 100.00
Warnings:
Note 1003 select dimension(`test`.`gis_geometry`.`g`) AS `Dimension(g)`,geometrytype(`test`.`gis_geometry`.`g`) AS `GeometryType(g)`,isempty(`test`.`gis_geometry`.`g`) AS `IsEmpty(g)`,astext(envelope(`test`.`gis_geometry`.`g`)) AS `AsText(Envelope(g))` from `test`.`gis_geometry`
SELECT fid, X(g) FROM gis_point ORDER by fid;
fid X(g)
101 10
102 20
103 20
104 10
SELECT fid, Y(g) FROM gis_point ORDER by fid;
fid Y(g)
101 10
102 10
103 20
104 20
explain extended select X(g),Y(g) FROM gis_point;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE gis_point ALL NULL NULL NULL NULL 4 100.00
Warnings:
Note 1003 select x(`test`.`gis_point`.`g`) AS `X(g)`,y(`test`.`gis_point`.`g`) AS `Y(g)` from `test`.`gis_point`
SELECT fid, AsText(StartPoint(g)) FROM gis_line ORDER by fid;
fid AsText(StartPoint(g))
105 POINT(0 0)
106 POINT(10 10)
107 POINT(10 10)
SELECT fid, AsText(EndPoint(g)) FROM gis_line ORDER by fid;
fid AsText(EndPoint(g))
105 POINT(10 0)
106 POINT(10 10)
107 POINT(40 10)
SELECT fid, GLength(g) FROM gis_line ORDER by fid;
fid GLength(g)
105 24.142135623731
106 40
107 30
SELECT fid, NumPoints(g) FROM gis_line ORDER by fid;
fid NumPoints(g)
105 3
106 5
107 2
SELECT fid, AsText(PointN(g, 2)) FROM gis_line ORDER by fid;
fid AsText(PointN(g, 2))
105 POINT(0 10)
106 POINT(20 10)
107 POINT(40 10)
SELECT fid, IsClosed(g) FROM gis_line ORDER by fid;
fid IsClosed(g)
105 0
106 1
107 0
explain extended select AsText(StartPoint(g)),AsText(EndPoint(g)),GLength(g),NumPoints(g),AsText(PointN(g, 2)),IsClosed(g) FROM gis_line;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE gis_line ALL NULL NULL NULL NULL 3 100.00
Warnings:
Note 1003 select astext(startpoint(`test`.`gis_line`.`g`)) AS `AsText(StartPoint(g))`,astext(endpoint(`test`.`gis_line`.`g`)) AS `AsText(EndPoint(g))`,glength(`test`.`gis_line`.`g`) AS `GLength(g)`,numpoints(`test`.`gis_line`.`g`) AS `NumPoints(g)`,astext(pointn(`test`.`gis_line`.`g`,2)) AS `AsText(PointN(g, 2))`,isclosed(`test`.`gis_line`.`g`) AS `IsClosed(g)` from `test`.`gis_line`
SELECT fid, AsText(Centroid(g)) FROM gis_polygon ORDER by fid;
fid AsText(Centroid(g))
108 POINT(15 15)
109 POINT(25.416666666667 25.416666666667)
110 POINT(20 10)
SELECT fid, Area(g) FROM gis_polygon ORDER by fid;
fid Area(g)
108 100
109 2400
110 450
SELECT fid, AsText(ExteriorRing(g)) FROM gis_polygon ORDER by fid;
fid AsText(ExteriorRing(g))
108 LINESTRING(10 10,20 10,20 20,10 20,10 10)
109 LINESTRING(0 0,50 0,50 50,0 50,0 0)
110 LINESTRING(0 0,30 0,30 30,0 0)
SELECT fid, NumInteriorRings(g) FROM gis_polygon ORDER by fid;
fid NumInteriorRings(g)
108 0
109 1
110 0
SELECT fid, AsText(InteriorRingN(g, 1)) FROM gis_polygon ORDER by fid;
fid AsText(InteriorRingN(g, 1))
108 NULL
109 LINESTRING(10 10,20 10,20 20,10 20,10 10)
110 NULL
explain extended select AsText(Centroid(g)),Area(g),AsText(ExteriorRing(g)),NumInteriorRings(g),AsText(InteriorRingN(g, 1)) FROM gis_polygon;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE gis_polygon ALL NULL NULL NULL NULL 3 100.00
Warnings:
Note 1003 select astext(centroid(`test`.`gis_polygon`.`g`)) AS `AsText(Centroid(g))`,area(`test`.`gis_polygon`.`g`) AS `Area(g)`,astext(exteriorring(`test`.`gis_polygon`.`g`)) AS `AsText(ExteriorRing(g))`,numinteriorrings(`test`.`gis_polygon`.`g`) AS `NumInteriorRings(g)`,astext(interiorringn(`test`.`gis_polygon`.`g`,1)) AS `AsText(InteriorRingN(g, 1))` from `test`.`gis_polygon`
SELECT fid, IsClosed(g) FROM gis_multi_line ORDER by fid;
fid IsClosed(g)
114 0
115 0
116 0
SELECT fid, AsText(Centroid(g)) FROM gis_multi_polygon ORDER by fid;
fid AsText(Centroid(g))
117 POINT(55.588527753042 17.426536064114)
118 POINT(55.588527753042 17.426536064114)
119 POINT(2 2)
SELECT fid, Area(g) FROM gis_multi_polygon ORDER by fid;
fid Area(g)
117 1684.5
118 1684.5
119 4.5
SELECT fid, NumGeometries(g) from gis_multi_point ORDER by fid;
fid NumGeometries(g)
111 4
112 4
113 2
SELECT fid, NumGeometries(g) from gis_multi_line ORDER by fid;
fid NumGeometries(g)
114 2
115 1
116 2
SELECT fid, NumGeometries(g) from gis_multi_polygon ORDER by fid;
fid NumGeometries(g)
117 2
118 2
119 1
SELECT fid, NumGeometries(g) from gis_geometrycollection ORDER by fid;
fid NumGeometries(g)
120 2
121 2
explain extended SELECT fid, NumGeometries(g) from gis_multi_point;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE gis_multi_point ALL NULL NULL NULL NULL 3 100.00
Warnings:
Note 1003 select `test`.`gis_multi_point`.`fid` AS `fid`,numgeometries(`test`.`gis_multi_point`.`g`) AS `NumGeometries(g)` from `test`.`gis_multi_point`
SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point ORDER by fid;
fid AsText(GeometryN(g, 2))
111 POINT(10 10)
112 POINT(11 11)
113 POINT(4 10)
SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_line ORDER by fid;
fid AsText(GeometryN(g, 2))
114 LINESTRING(16 0,16 23,16 48)
115 NULL
116 LINESTRING(2 5,5 8,21 7)
SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_polygon ORDER by fid;
fid AsText(GeometryN(g, 2))
117 POLYGON((59 18,67 18,67 13,59 13,59 18))
118 POLYGON((59 18,67 18,67 13,59 13,59 18))
119 NULL
SELECT fid, AsText(GeometryN(g, 2)) from gis_geometrycollection ORDER by fid;
fid AsText(GeometryN(g, 2))
120 LINESTRING(0 0,10 10)
121 LINESTRING(3 6,7 9)
SELECT fid, AsText(GeometryN(g, 1)) from gis_geometrycollection ORDER by fid;
fid AsText(GeometryN(g, 1))
120 POINT(0 0)
121 POINT(44 6)
explain extended SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE gis_multi_point ALL NULL NULL NULL NULL 3 100.00
Warnings:
Note 1003 select `test`.`gis_multi_point`.`fid` AS `fid`,astext(geometryn(`test`.`gis_multi_point`.`g`,2)) AS `AsText(GeometryN(g, 2))` from `test`.`gis_multi_point`
SELECT g1.fid as first, g2.fid as second,
Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o,
Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t,
Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r
FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second;
first second w c o e d t i r
120 120 1 1 0 1 0 0 1 0
120 121 0 0 0 0 0 0 1 0
121 120 0 0 1 0 0 0 1 0
121 121 1 1 0 1 0 0 1 0
explain extended SELECT g1.fid as first, g2.fid as second,
Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o,
Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t,
Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r
FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second;
id select_type table type possible_keys key key_len ref rows filtered Extra
1 SIMPLE g1 ALL NULL NULL NULL NULL 2 100.00 Using temporary; Using filesort
1 SIMPLE g2 ALL NULL NULL NULL NULL 2 100.00
Warnings:
Note 1003 select `test`.`g1`.`fid` AS `first`,`test`.`g2`.`fid` AS `second`,within(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `w`,contains(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `c`,overlaps(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `o`,equals(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `e`,disjoint(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `d`,touches(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `t`,intersects(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `i`,crosses(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `r` from `test`.`gis_geometrycollection` `g1` join `test`.`gis_geometrycollection` `g2` order by `test`.`g1`.`fid`,`test`.`g2`.`fid`
DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
CREATE TABLE t1 (
a INTEGER PRIMARY KEY AUTO_INCREMENT,
gp point,
ln linestring,
pg polygon,
mp multipoint,
mln multilinestring,
mpg multipolygon,
gc geometrycollection,
gm geometry
);
SHOW FIELDS FROM t1;
Field Type Null Key Default Extra
a int(11) NO PRI NULL auto_increment
gp point YES NULL
ln linestring YES NULL
pg polygon YES NULL
mp multipoint YES NULL
mln multilinestring YES NULL
mpg multipolygon YES NULL
gc geometrycollection YES NULL
gm geometry YES NULL
ALTER TABLE t1 ADD fid INT;
SHOW FIELDS FROM t1;
Field Type Null Key Default Extra
a int(11) NO PRI NULL auto_increment
gp point YES NULL
ln linestring YES NULL
pg polygon YES NULL
mp multipoint YES NULL
mln multilinestring YES NULL
mpg multipolygon YES NULL
gc geometrycollection YES NULL
gm geometry YES NULL
fid int(11) YES NULL
DROP TABLE t1;
create table t1 (pk integer primary key auto_increment, a geometry not null);
insert into t1 (a) values (GeomFromText('Point(1 2)'));
insert into t1 (a) values ('Garbage');
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
insert IGNORE into t1 (a) values ('Garbage');
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
drop table t1;
create table t1 (pk integer primary key auto_increment, fl geometry);
insert into t1 (fl) values (1);
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
insert into t1 (fl) values (1.11);
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
insert into t1 (fl) values ("qwerty");
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
insert into t1 (fl) values (pointfromtext('point(1,1)'));
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
drop table t1;

View File

@ -1,6 +1,6 @@
drop table if exists t1, t2; drop table if exists t1, t2;
reset master; reset master;
create table t1 (a int) engine=bdb; create table t1 (a int) engine=innodb;
create table t2 (a int) engine=innodb; create table t2 (a int) engine=innodb;
begin; begin;
insert t1 values (5); insert t1 values (5);
@ -10,12 +10,12 @@ insert t2 values (5);
commit; commit;
show binlog events from 102; show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=bdb master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=innodb
master-bin.000001 # Query 1 # use `test`; create table t2 (a int) engine=innodb master-bin.000001 # Query 1 # use `test`; create table t2 (a int) engine=innodb
master-bin.000001 # Query 1 # use `test`; BEGIN master-bin.000001 # Query 1 # use `test`; BEGIN
master-bin.000001 # Table_map 1 # table_id: # (test.t1) master-bin.000001 # Table_map 1 # table_id: # (test.t1)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
master-bin.000001 # Query 1 # use `test`; COMMIT master-bin.000001 # Xid 1 # COMMIT /* xid= */
master-bin.000001 # Query 1 # use `test`; BEGIN master-bin.000001 # Query 1 # use `test`; BEGIN
master-bin.000001 # Table_map 1 # table_id: # (test.t2) master-bin.000001 # Table_map 1 # table_id: # (test.t2)
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F

View File

@ -12,7 +12,7 @@ master-bin.000001 367 Xid 1 394 COMMIT /* XID */
drop table t1; drop table t1;
drop table if exists t1, t2; drop table if exists t1, t2;
reset master; reset master;
create table t1 (a int) engine=bdb; create table t1 (a int) engine=innodb;
create table t2 (a int) engine=innodb; create table t2 (a int) engine=innodb;
begin; begin;
insert t1 values (5); insert t1 values (5);
@ -22,11 +22,11 @@ insert t2 values (5);
commit; commit;
show binlog events from 102; show binlog events from 102;
Log_name Pos Event_type Server_id End_log_pos Info Log_name Pos Event_type Server_id End_log_pos Info
master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=bdb master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=innodb
master-bin.000001 # Query 1 # use `test`; create table t2 (a int) engine=innodb master-bin.000001 # Query 1 # use `test`; create table t2 (a int) engine=innodb
master-bin.000001 # Query 1 # use `test`; BEGIN master-bin.000001 # Query 1 # use `test`; BEGIN
master-bin.000001 # Query 1 # use `test`; insert t1 values (5) master-bin.000001 # Query 1 # use `test`; insert t1 values (5)
master-bin.000001 # Query 1 # use `test`; COMMIT master-bin.000001 # Xid 1 # COMMIT /* xid= */
master-bin.000001 # Query 1 # use `test`; BEGIN master-bin.000001 # Query 1 # use `test`; BEGIN
master-bin.000001 # Query 1 # use `test`; insert t2 values (5) master-bin.000001 # Query 1 # use `test`; insert t2 values (5)
master-bin.000001 # Xid 1 # COMMIT /* xid= */ master-bin.000001 # Xid 1 # COMMIT /* xid= */

View File

@ -479,7 +479,7 @@ drop table t1;
create table t1 ( create table t1 (
c char(10) character set utf8, c char(10) character set utf8,
unique key a (c(1)) unique key a (c(1))
) engine=bdb; ) engine=innodb;
insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f'); insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
insert into t1 values ('aa'); insert into t1 values ('aa');
ERROR 23000: Duplicate entry 'aa' for key 'a' ERROR 23000: Duplicate entry 'aa' for key 'a'
@ -637,7 +637,7 @@ drop table t1;
create table t1 ( create table t1 (
c char(10) character set utf8 collate utf8_bin, c char(10) character set utf8 collate utf8_bin,
unique key a (c(1)) unique key a (c(1))
) engine=bdb; ) engine=innodb;
insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f'); insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
insert into t1 values ('aa'); insert into t1 values ('aa');
ERROR 23000: Duplicate entry 'aa' for key 'a' ERROR 23000: Duplicate entry 'aa' for key 'a'
@ -707,7 +707,7 @@ drop table t1;
create table t1 ( create table t1 (
str varchar(255) character set utf8 not null, str varchar(255) character set utf8 not null,
key str (str(2)) key str (str(2))
) engine=bdb; ) engine=innodb;
INSERT INTO t1 VALUES ('str'); INSERT INTO t1 VALUES ('str');
INSERT INTO t1 VALUES ('str2'); INSERT INTO t1 VALUES ('str2');
select * from t1 where str='str'; select * from t1 where str='str';
@ -796,7 +796,7 @@ insert into t1 values(1,'foo'),(2,'foobar');
select * from t1 where b like 'foob%'; select * from t1 where b like 'foob%';
a b a b
2 foobar 2 foobar
alter table t1 engine=bdb; alter table t1 engine=innodb;
select * from t1 where b like 'foob%'; select * from t1 where b like 'foob%';
a b a b
2 foobar 2 foobar

View File

@ -1,2 +0,0 @@
Variable_name Value
have_bdb YES

View File

@ -43,7 +43,6 @@ character-sets-dir option_value
basedir option_value basedir option_value
skip-stack-trace option_value skip-stack-trace option_value
skip-innodb option_value skip-innodb option_value
skip-bdb option_value
skip-ndbcluster option_value skip-ndbcluster option_value
nonguarded option_value nonguarded option_value
log-output option_value log-output option_value
@ -64,7 +63,6 @@ character-sets-dir option_value
basedir option_value basedir option_value
skip-stack-trace option_value skip-stack-trace option_value
skip-innodb option_value skip-innodb option_value
skip-bdb option_value
skip-ndbcluster option_value skip-ndbcluster option_value
nonguarded option_value nonguarded option_value
log-output option_value log-output option_value

View File

@ -22,7 +22,6 @@ basedir VALUE
server_id VALUE server_id VALUE
skip-stack-trace VALUE skip-stack-trace VALUE
skip-innodb VALUE skip-innodb VALUE
skip-bdb VALUE
skip-ndbcluster VALUE skip-ndbcluster VALUE
log-output VALUE log-output VALUE
SHOW INSTANCE OPTIONS mysqld2; SHOW INSTANCE OPTIONS mysqld2;
@ -41,7 +40,6 @@ basedir VALUE
server_id VALUE server_id VALUE
skip-stack-trace VALUE skip-stack-trace VALUE
skip-innodb VALUE skip-innodb VALUE
skip-bdb VALUE
skip-ndbcluster VALUE skip-ndbcluster VALUE
nonguarded VALUE nonguarded VALUE
log-output VALUE log-output VALUE

View File

@ -1,136 +0,0 @@
drop table if exists t1;
create table t1 (
pk int primary key,
key1 int,
key2 int,
filler char(200),
filler2 char(200),
index(key1),
index(key2)
) engine=bdb;
select * from t1 where (key1 >= 2 and key1 <= 10) or (pk >= 4 and pk <=8 );
pk key1 key2 filler filler2
2 2 2 filler-data filler-data-2
3 3 3 filler-data filler-data-2
9 9 9 filler-data filler-data-2
10 10 10 filler-data filler-data-2
4 4 4 filler-data filler-data-2
5 5 5 filler-data filler-data-2
6 6 6 filler-data filler-data-2
7 7 7 filler-data filler-data-2
8 8 8 filler-data filler-data-2
set @maxv=1000;
select * from t1 where
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
or key1=18 or key1=60;
pk key1 key2 filler filler2
18 18 18 filler-data filler-data-2
60 60 60 filler-data filler-data-2
1 1 1 filler-data filler-data-2
2 2 2 filler-data filler-data-2
3 3 3 filler-data filler-data-2
4 4 4 filler-data filler-data-2
11 11 11 filler-data filler-data-2
12 12 12 filler-data filler-data-2
13 13 13 filler-data filler-data-2
14 14 14 filler-data filler-data-2
50 50 50 filler-data filler-data-2
51 51 51 filler-data filler-data-2
52 52 52 filler-data filler-data-2
53 53 53 filler-data filler-data-2
54 54 54 filler-data filler-data-2
991 991 991 filler-data filler-data-2
992 992 992 filler-data filler-data-2
993 993 993 filler-data filler-data-2
994 994 994 filler-data filler-data-2
995 995 995 filler-data filler-data-2
996 996 996 filler-data filler-data-2
997 997 997 filler-data filler-data-2
998 998 998 filler-data filler-data-2
999 999 999 filler-data filler-data-2
1000 1000 1000 filler-data filler-data-2
select * from t1 where
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
or key1 < 3 or key1 > @maxv-11;
pk key1 key2 filler filler2
990 990 990 filler-data filler-data-2
1 1 1 filler-data filler-data-2
2 2 2 filler-data filler-data-2
3 3 3 filler-data filler-data-2
4 4 4 filler-data filler-data-2
11 11 11 filler-data filler-data-2
12 12 12 filler-data filler-data-2
13 13 13 filler-data filler-data-2
14 14 14 filler-data filler-data-2
50 50 50 filler-data filler-data-2
51 51 51 filler-data filler-data-2
52 52 52 filler-data filler-data-2
53 53 53 filler-data filler-data-2
54 54 54 filler-data filler-data-2
991 991 991 filler-data filler-data-2
992 992 992 filler-data filler-data-2
993 993 993 filler-data filler-data-2
994 994 994 filler-data filler-data-2
995 995 995 filler-data filler-data-2
996 996 996 filler-data filler-data-2
997 997 997 filler-data filler-data-2
998 998 998 filler-data filler-data-2
999 999 999 filler-data filler-data-2
1000 1000 1000 filler-data filler-data-2
select * from t1 where
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
or
(key1 < 5) or (key1 > 10 and key1 < 15) or (key1 >= 50 and key1 < 55 ) or (key1 > @maxv-10);
pk key1 key2 filler filler2
1 1 1 filler-data filler-data-2
2 2 2 filler-data filler-data-2
3 3 3 filler-data filler-data-2
4 4 4 filler-data filler-data-2
11 11 11 filler-data filler-data-2
12 12 12 filler-data filler-data-2
13 13 13 filler-data filler-data-2
14 14 14 filler-data filler-data-2
50 50 50 filler-data filler-data-2
51 51 51 filler-data filler-data-2
52 52 52 filler-data filler-data-2
53 53 53 filler-data filler-data-2
54 54 54 filler-data filler-data-2
991 991 991 filler-data filler-data-2
992 992 992 filler-data filler-data-2
993 993 993 filler-data filler-data-2
994 994 994 filler-data filler-data-2
995 995 995 filler-data filler-data-2
996 996 996 filler-data filler-data-2
997 997 997 filler-data filler-data-2
998 998 998 filler-data filler-data-2
999 999 999 filler-data filler-data-2
1000 1000 1000 filler-data filler-data-2
select * from t1 where
(pk > 10 and pk < 15) or (pk >= 50 and pk < 55 )
or
(key1 < 5) or (key1 > @maxv-10);
pk key1 key2 filler filler2
1 1 1 filler-data filler-data-2
2 2 2 filler-data filler-data-2
3 3 3 filler-data filler-data-2
4 4 4 filler-data filler-data-2
991 991 991 filler-data filler-data-2
992 992 992 filler-data filler-data-2
993 993 993 filler-data filler-data-2
994 994 994 filler-data filler-data-2
995 995 995 filler-data filler-data-2
996 996 996 filler-data filler-data-2
997 997 997 filler-data filler-data-2
998 998 998 filler-data filler-data-2
999 999 999 filler-data filler-data-2
1000 1000 1000 filler-data filler-data-2
11 11 11 filler-data filler-data-2
12 12 12 filler-data filler-data-2
13 13 13 filler-data filler-data-2
14 14 14 filler-data filler-data-2
50 50 50 filler-data filler-data-2
51 51 51 filler-data filler-data-2
52 52 52 filler-data filler-data-2
53 53 53 filler-data filler-data-2
54 54 54 filler-data filler-data-2
drop table t1;

View File

@ -29,13 +29,13 @@ on (mysql.general_log.command_type = join_test.command_type)
drop table join_test; drop table join_test;
flush logs; flush logs;
lock tables mysql.general_log WRITE; lock tables mysql.general_log WRITE;
ERROR HY000: You can't write-lock a log table. Only read access is possible. ERROR HY000: You can't write-lock a log table. Only read access is possible
lock tables mysql.slow_log WRITE; lock tables mysql.slow_log WRITE;
ERROR HY000: You can't write-lock a log table. Only read access is possible. ERROR HY000: You can't write-lock a log table. Only read access is possible
lock tables mysql.general_log READ; lock tables mysql.general_log READ;
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead. ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead
lock tables mysql.slow_log READ; lock tables mysql.slow_log READ;
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead. ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead
lock tables mysql.slow_log READ LOCAL, mysql.general_log READ LOCAL; lock tables mysql.slow_log READ LOCAL, mysql.general_log READ LOCAL;
unlock tables; unlock tables;
lock tables mysql.general_log READ LOCAL; lock tables mysql.general_log READ LOCAL;
@ -161,13 +161,13 @@ TIMESTAMP USER_HOST THREAD_ID 1 Query set global slow_query_log='ON'
TIMESTAMP USER_HOST THREAD_ID 1 Query select * from mysql.general_log TIMESTAMP USER_HOST THREAD_ID 1 Query select * from mysql.general_log
flush logs; flush logs;
lock tables mysql.general_log WRITE; lock tables mysql.general_log WRITE;
ERROR HY000: You can't write-lock a log table. Only read access is possible. ERROR HY000: You can't write-lock a log table. Only read access is possible
lock tables mysql.slow_log WRITE; lock tables mysql.slow_log WRITE;
ERROR HY000: You can't write-lock a log table. Only read access is possible. ERROR HY000: You can't write-lock a log table. Only read access is possible
lock tables mysql.general_log READ; lock tables mysql.general_log READ;
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead. ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead
lock tables mysql.slow_log READ; lock tables mysql.slow_log READ;
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead. ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead
lock tables mysql.slow_log READ LOCAL, mysql.general_log READ LOCAL; lock tables mysql.slow_log READ LOCAL, mysql.general_log READ LOCAL;
unlock tables; unlock tables;
set global general_log='OFF'; set global general_log='OFF';

View File

@ -492,7 +492,7 @@ create table t2 like t1;
insert into t2 select * from t1; insert into t2 select * from t1;
delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b; delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b;
drop table t1,t2; drop table t1,t2;
create table t1 ( c char(8) not null ) engine=bdb; create table t1 ( c char(8) not null ) engine=innodb;
insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'); insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9');
insert into t1 values ('A'),('B'),('C'),('D'),('E'),('F'); insert into t1 values ('A'),('B'),('C'),('D'),('E'),('F');
alter table t1 add b char(8) not null; alter table t1 add b char(8) not null;

File diff suppressed because it is too large Load Diff

View File

@ -1,186 +0,0 @@
drop table if exists t1, t2, t3,t4;
create table t1 (
pk1 int not NULL,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values (-5, 1, 1),
(-100, 1, 1),
(3, 1, 1),
(0, 1, 1),
(10, 1, 1);
explain select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL 5 Using sort_union(key1,key2); Using where
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
pk1 key1 key2
-100 1 1
-5 1 1
0 1 1
3 1 1
10 1 1
drop table t1;
create table t1 (
pk1 int unsigned not NULL,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values (0, 1, 1),
(0xFFFFFFFF, 1, 1),
(0xFFFFFFFE, 1, 1),
(1, 1, 1),
(2, 1, 1);
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
pk1 key1 key2
0 1 1
1 1 1
2 1 1
4294967294 1 1
4294967295 1 1
drop table t1;
create table t1 (
pk1 char(4) not NULL,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb collate latin2_general_ci;
insert into t1 values ('a1', 1, 1),
('b2', 1, 1),
('A3', 1, 1),
('B4', 1, 1);
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
pk1 key1 key2
a1 1 1
A3 1 1
b2 1 1
B4 1 1
drop table t1;
create table t1 (
pk1 int not NULL,
pk2 char(4) not NULL collate latin1_german1_ci,
pk3 char(4) not NULL collate latin1_bin,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1,pk2,pk3),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values
(1, 'u', 'u', 1, 1),
(1, 'u', char(0xEC), 1, 1),
(1, 'u', 'x', 1, 1);
insert ignore into t1 select pk1, char(0xEC), pk3, key1, key2 from t1;
insert ignore into t1 select pk1, 'x', pk3, key1, key2 from t1 where pk2='u';
insert ignore into t1 select 2, pk2, pk3, key1, key2 from t1;
select * from t1;
pk1 pk2 pk3 key1 key2
1 <09> u 1 1
1 <09> x 1 1
1 <09> <09> 1 1
1 u u 1 1
1 u x 1 1
1 u <09> 1 1
1 x u 1 1
1 x x 1 1
1 x <09> 1 1
2 <09> u 1 1
2 <09> x 1 1
2 <09> <09> 1 1
2 u u 1 1
2 u x 1 1
2 u <09> 1 1
2 x u 1 1
2 x x 1 1
2 x <09> 1 1
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
pk1 pk2 pk3 key1 key2
1 <09> u 1 1
1 <09> x 1 1
1 <09> <09> 1 1
1 u u 1 1
1 u x 1 1
1 u <09> 1 1
1 x u 1 1
1 x x 1 1
1 x <09> 1 1
2 <09> u 1 1
2 <09> x 1 1
2 <09> <09> 1 1
2 u u 1 1
2 u x 1 1
2 u <09> 1 1
2 x u 1 1
2 x x 1 1
2 x <09> 1 1
alter table t1 drop primary key;
select * from t1;
pk1 pk2 pk3 key1 key2
1 <09> u 1 1
1 <09> x 1 1
1 <09> <09> 1 1
1 u u 1 1
1 u x 1 1
1 u <09> 1 1
1 x u 1 1
1 x x 1 1
1 x <09> 1 1
2 <09> u 1 1
2 <09> x 1 1
2 <09> <09> 1 1
2 u u 1 1
2 u x 1 1
2 u <09> 1 1
2 x u 1 1
2 x x 1 1
2 x <09> 1 1
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
pk1 pk2 pk3 key1 key2
1 <09> u 1 1
1 <09> x 1 1
1 <09> <09> 1 1
1 u u 1 1
1 u x 1 1
1 u <09> 1 1
1 x u 1 1
1 x x 1 1
1 x <09> 1 1
2 <09> u 1 1
2 <09> x 1 1
2 <09> <09> 1 1
2 u u 1 1
2 u x 1 1
2 u <09> 1 1
2 x u 1 1
2 x x 1 1
2 x <09> 1 1
drop table t1;
create table t1 (
pk1 varchar(8) NOT NULL default '',
pk2 varchar(4) NOT NULL default '',
key1 int(11),
key2 int(11),
primary key(pk1, pk2),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values ('','empt',2,2),
('a','a--a',2,2),
('bb','b--b',2,2),
('ccc','c--c',2,2),
('dddd','d--d',2,2);
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
pk1 pk2 key1 key2
empt 2 2
a a--a 2 2
bb b--b 2 2
ccc c--c 2 2
dddd d--d 2 2
drop table t1;

View File

@ -692,7 +692,7 @@ drop database mysqltest;
show full plugin; show full plugin;
show warnings; show warnings;
Level Code Message Level Code Message
Warning 1541 The syntax 'SHOW PLUGIN' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW PLUGINS' instead. Warning 1541 The syntax 'SHOW PLUGIN' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW PLUGINS' instead
show plugin; show plugin;
show plugins; show plugins;
End of 5.1 tests End of 5.1 tests

View File

@ -535,7 +535,7 @@ use db_bug7787|
CREATE PROCEDURE p1() CREATE PROCEDURE p1()
SHOW INNODB STATUS; | SHOW INNODB STATUS; |
Warnings: Warnings:
Warning 1541 The syntax 'SHOW INNODB STATUS' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW ENGINE INNODB STATUS' instead. Warning 1541 The syntax 'SHOW INNODB STATUS' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW ENGINE INNODB STATUS' instead
GRANT EXECUTE ON PROCEDURE p1 TO user_bug7787@localhost| GRANT EXECUTE ON PROCEDURE p1 TO user_bug7787@localhost|
DROP DATABASE db_bug7787| DROP DATABASE db_bug7787|
drop user user_bug7787@localhost| drop user user_bug7787@localhost|

View File

@ -101,13 +101,13 @@ create table t1 (t2 timestamp(2), t4 timestamp(4), t6 timestamp(6),
t8 timestamp(8), t10 timestamp(10), t12 timestamp(12), t8 timestamp(8), t10 timestamp(10), t12 timestamp(12),
t14 timestamp(14)); t14 timestamp(14));
Warnings: Warnings:
Warning 1541 The syntax 'TIMESTAMP(2)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead. Warning 1541 The syntax 'TIMESTAMP(2)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
Warning 1541 The syntax 'TIMESTAMP(4)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead. Warning 1541 The syntax 'TIMESTAMP(4)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
Warning 1541 The syntax 'TIMESTAMP(6)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead. Warning 1541 The syntax 'TIMESTAMP(6)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
Warning 1541 The syntax 'TIMESTAMP(8)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead. Warning 1541 The syntax 'TIMESTAMP(8)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
Warning 1541 The syntax 'TIMESTAMP(10)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead. Warning 1541 The syntax 'TIMESTAMP(10)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
Warning 1541 The syntax 'TIMESTAMP(12)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead. Warning 1541 The syntax 'TIMESTAMP(12)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
Warning 1541 The syntax 'TIMESTAMP(14)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead. Warning 1541 The syntax 'TIMESTAMP(14)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
insert t1 values (0,0,0,0,0,0,0), insert t1 values (0,0,0,0,0,0,0),
("1997-12-31 23:47:59", "1997-12-31 23:47:59", "1997-12-31 23:47:59", ("1997-12-31 23:47:59", "1997-12-31 23:47:59", "1997-12-31 23:47:59",
"1997-12-31 23:47:59", "1997-12-31 23:47:59", "1997-12-31 23:47:59", "1997-12-31 23:47:59", "1997-12-31 23:47:59", "1997-12-31 23:47:59",

View File

@ -175,7 +175,7 @@ Warning 1266 Using storage engine MyISAM for table 't1'
drop table t1; drop table t1;
set table_type=MYISAM; set table_type=MYISAM;
Warnings: Warnings:
Warning 1541 The syntax 'table_type' is deprecated and will be removed in MySQL 5.2. Please use 'storage_engine' instead. Warning 1541 The syntax 'table_type' is deprecated and will be removed in MySQL 5.2. Please use 'storage_engine' instead
create table t1 (a int); create table t1 (a int);
insert into t1 (a) values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); insert into t1 (a) values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
update t1 set a='abc'; update t1 set a='abc';

View File

@ -1,18 +0,0 @@
#
# Test of problem when shutting down mysqld at once after ALTER TABLE
#
-- source include/have_bdb.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
create table t1(objid BIGINT not null, tablename varchar(64), oid BIGINT not null, test BIGINT, PRIMARY KEY (objid), UNIQUE(tablename)) engine=BDB;
insert into t1 values(1, 't1',4,9);
insert into t1 values(2, 'metatable',1,9);
insert into t1 values(3, 'metaindex',1,9 );
select * from t1;
alter table t1 drop column test;
# Now we do a reboot and continue with the next test
# End of 4.1 tests

View File

@ -1,2 +0,0 @@
--skip-external-locking

View File

@ -1,10 +0,0 @@
#
# Note that this test uses tables from the previous test
# This is to test that the table t1 survives a reboot of MySQL
# The options in the -master.opt file are just there to force the reboot
#
-- source include/have_bdb.inc
select * from t1;
drop table t1;
# End of 4.1 tests

View File

@ -1,51 +0,0 @@
-- source include/have_bdb.inc
# test for bug reported by Mark Steele
--disable_warnings
drop table if exists t1;
--enable_warnings
CREATE TABLE t1 (
ChargeID int(10) unsigned NOT NULL auto_increment,
ServiceID int(10) unsigned DEFAULT '0' NOT NULL,
ChargeDate date DEFAULT '0000-00-00' NOT NULL,
ChargeAmount decimal(20,2) DEFAULT '0.00' NOT NULL,
FedTaxes decimal(20,2) DEFAULT '0.00' NOT NULL,
ProvTaxes decimal(20,2) DEFAULT '0.00' NOT NULL,
ChargeStatus enum('New','Auth','Unauth','Sale','Denied','Refund')
DEFAULT 'New' NOT NULL,
ChargeAuthorizationMessage text,
ChargeComment text,
ChargeTimeStamp varchar(20),
PRIMARY KEY (ChargeID),
KEY ServiceID (ServiceID),
KEY ChargeDate (ChargeDate)
) engine=BDB;
BEGIN;
INSERT INTO t1
VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now');
COMMIT;
BEGIN;
UPDATE t1 SET ChargeAuthorizationMessage = 'blablabla' WHERE
ChargeID = 1;
COMMIT;
INSERT INTO t1
VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now');
select * from t1;
drop table t1;
#
# Test for bug #2342 "Running ANALYZE TABLE on bdb table
# inside a transaction hangs server thread"
create table t1 (a int) engine=bdb;
set autocommit=0;
insert into t1 values(1);
analyze table t1;
drop table t1;
# End of 4.1 tests

View File

@ -1,59 +0,0 @@
# This test doesn't work with the embedded version as this code
# assumes that one query is running while we are doing queries on
# a second connection.
# This would work if mysqltest run would be threaded and handle each
# connection in a separate thread.
#
-- source include/not_embedded.inc
-- source include/have_bdb.inc
connect (con1,localhost,root,,);
connect (con2,localhost,root,,);
--disable_warnings
drop table if exists t1,t2;
--enable_warnings
connection con1;
create table t1 (id integer, x integer) engine=BDB;
create table t2 (id integer, x integer) engine=BDB;
insert into t1 values(0, 0);
insert into t2 values(0, 0);
set autocommit=0;
update t1 set x = 1 where id = 0;
connection con2;
set autocommit=0;
update t2 set x = 1 where id = 0;
# The following query should hang because con1 is locking the page
--send
select x from t1 where id = 0;
connection con1;
# This should generate a deadlock as we are trying to access a locked row
--send
select x from t2 where id = 0;
connection con2;
--error 1213
reap;
commit;
connection con1;
reap;
commit;
connection con2;
select * from t1;
select * from t2;
commit;
connection con1;
select * from t1;
select * from t2;
commit;
drop table t1,t2;
# End of 4.1 tests

View File

@ -1,59 +0,0 @@
# This test doesn't work with the embedded version as this code
# assumes that one query is running while we are doing queries on
# a second connection.
# This would work if mysqltest run would be threaded and handle each
# connection in a separate thread.
#
#-- source include/not_embedded.inc
-- source include/have_bdb.inc
connect (con1,localhost,root,,);
connect (con2,localhost,root,,);
--disable_warnings
drop table if exists t1,t2;
--enable_warnings
connection con1;
create table t1 (id integer, x integer) engine=BDB;
create table t2 (id integer, x integer) engine=BDB;
insert into t1 values(0, 0);
insert into t2 values(0, 0);
set autocommit=0;
update t1 set x = 1 where id = 0;
connection con2;
set autocommit=0;
update t2 set x = 1 where id = 0;
# The following query should hang because con1 is locking the page
--send
select x from t1 where id = 0;
connection con1;
# This should generate a deadlock as we are trying to access a locked row
--send
select x from t2 where id = 0;
connection con2;
--error 1213
reap;
commit;
connection con1;
reap;
commit;
connection con2;
select * from t1;
select * from t2;
commit;
connection con1;
select * from t1;
select * from t2;
commit;
drop table t1,t2;
# End of 4.1 tests

File diff suppressed because it is too large Load Diff

View File

@ -1 +0,0 @@
--set-variable=query_cache_size=1M

View File

@ -1,53 +0,0 @@
-- source include/have_bdb.inc
-- source include/have_query_cache.inc
#
# Without auto_commit.
#
--disable_warnings
drop table if exists t1, t2, t3;
--enable_warnings
flush status;
set autocommit=0;
create table t1 (a int not null) engine=bdb;
insert into t1 values (1),(2),(3);
select * from t1;
show status like "Qcache_queries_in_cache";
drop table t1;
set autocommit=1;
create table t1 (a int not null) engine=bdb;
begin;
insert into t1 values (1),(2),(3);
select * from t1;
show status like "Qcache_queries_in_cache";
drop table t1;
create table t1 (a int not null) engine=bdb;
create table t2 (a int not null) engine=bdb;
create table t3 (a int not null) engine=bdb;
insert into t1 values (1),(2);
insert into t2 values (1),(2);
insert into t3 values (1),(2);
select * from t1;
select * from t2;
select * from t3;
show status like "Qcache_queries_in_cache";
show status like "Qcache_hits";
begin;
select * from t1;
select * from t2;
select * from t3;
show status like "Qcache_queries_in_cache";
show status like "Qcache_hits";
insert into t1 values (3);
insert into t2 values (3);
insert into t1 values (4);
select * from t1;
select * from t2;
select * from t3;
show status like "Qcache_queries_in_cache";
show status like "Qcache_hits";
commit;
show status like "Qcache_queries_in_cache";
drop table if exists t1, t2, t3;
# End of 4.1 tests

View File

@ -1,3 +0,0 @@
-- source include/have_bdb.inc
SET storage_engine=bdb;
--source include/gis_generic.inc

View File

@ -360,7 +360,7 @@ drop table t1;
create table t1 ( create table t1 (
c char(10) character set utf8, c char(10) character set utf8,
unique key a (c(1)) unique key a (c(1))
) engine=bdb; ) engine=innodb;
--enable_warnings --enable_warnings
insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f'); insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
--error 1062 --error 1062
@ -483,7 +483,7 @@ drop table t1;
create table t1 ( create table t1 (
c char(10) character set utf8 collate utf8_bin, c char(10) character set utf8 collate utf8_bin,
unique key a (c(1)) unique key a (c(1))
) engine=bdb; ) engine=innodb;
--enable_warnings --enable_warnings
insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f'); insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
--error 1062 --error 1062
@ -558,7 +558,7 @@ drop table t1;
create table t1 ( create table t1 (
str varchar(255) character set utf8 not null, str varchar(255) character set utf8 not null,
key str (str(2)) key str (str(2))
) engine=bdb; ) engine=innodb;
--enable_warnings --enable_warnings
INSERT INTO t1 VALUES ('str'); INSERT INTO t1 VALUES ('str');
INSERT INTO t1 VALUES ('str2'); INSERT INTO t1 VALUES ('str2');
@ -644,7 +644,7 @@ create table t1 (
insert into t1 values(1,'foo'),(2,'foobar'); insert into t1 values(1,'foo'),(2,'foobar');
select * from t1 where b like 'foob%'; select * from t1 where b like 'foob%';
--disable_warnings --disable_warnings
alter table t1 engine=bdb; alter table t1 engine=innodb;
--enable_warnings --enable_warnings
select * from t1 where b like 'foob%'; select * from t1 where b like 'foob%';
drop table t1; drop table t1;

View File

@ -44,3 +44,4 @@ rpl_row_basic_7ndb : BUG#21298 2006-07-27 msvensson
rpl_truncate_7ndb : BUG#21298 2006-07-27 msvensson rpl_truncate_7ndb : BUG#21298 2006-07-27 msvensson
crash_commit_before : 2006-08-02 msvensson crash_commit_before : 2006-08-02 msvensson
rpl_ndb_dd_advance : BUG#18679 2006-07-28 jimw (Test fails randomly) rpl_ndb_dd_advance : BUG#18679 2006-07-28 jimw (Test fails randomly)
federated_transactions : Need to be re-enabled once Patrick's merge is complete

View File

@ -10,7 +10,7 @@ CREATE TABLE federated.t1 (
`id` int(20) NOT NULL, `id` int(20) NOT NULL,
`name` varchar(32) NOT NULL default '' `name` varchar(32) NOT NULL default ''
) )
DEFAULT CHARSET=latin1 ENGINE=InnoDB; DEFAULT CHARSET=latin1 ENGINE=innodb;
connection master; connection master;
DROP TABLE IF EXISTS federated.t1; DROP TABLE IF EXISTS federated.t1;

View File

@ -1,52 +0,0 @@
#
# 2-sweeps read Index_merge test
#
-- source include/have_bdb.inc
--disable_warnings
drop table if exists t1;
--enable_warnings
create table t1 (
pk int primary key,
key1 int,
key2 int,
filler char(200),
filler2 char(200),
index(key1),
index(key2)
) engine=bdb;
--disable_query_log
let $1=1000;
while ($1)
{
eval insert into t1 values($1, $1, $1, 'filler-data','filler-data-2');
dec $1;
}
--enable_query_log
select * from t1 where (key1 >= 2 and key1 <= 10) or (pk >= 4 and pk <=8 );
set @maxv=1000;
select * from t1 where
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
or key1=18 or key1=60;
select * from t1 where
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
or key1 < 3 or key1 > @maxv-11;
select * from t1 where
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
or
(key1 < 5) or (key1 > 10 and key1 < 15) or (key1 >= 50 and key1 < 55 ) or (key1 > @maxv-10);
select * from t1 where
(pk > 10 and pk < 15) or (pk >= 50 and pk < 55 )
or
(key1 < 5) or (key1 > @maxv-10);
drop table t1;

View File

@ -485,7 +485,7 @@ delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b;
drop table t1,t2; drop table t1,t2;
--disable_warnings --disable_warnings
create table t1 ( c char(8) not null ) engine=bdb; create table t1 ( c char(8) not null ) engine=innodb;
--enable_warnings --enable_warnings
insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9'); insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9');

View File

@ -1,25 +0,0 @@
###############################################
# #
# Prepared Statements test on BDB tables #
# #
###############################################
#
# NOTE: PLEASE SEE ps_1general.test (bottom)
# BEFORE ADDING NEW TEST CASES HERE !!!
use test;
-- source include/have_bdb.inc
let $type= 'BDB' ;
-- source include/ps_create.inc
-- source include/ps_renew.inc
-- source include/ps_query.inc
-- source include/ps_modify.inc
-- source include/ps_modify1.inc
-- source include/ps_conv.inc
drop table t1, t9;
# End of 4.1 tests

View File

@ -1,108 +0,0 @@
#
# Test for rowid ordering (and comparison) functions.
# do index_merge select for tables with PK of various types.
#
--disable_warnings
drop table if exists t1, t2, t3,t4;
--enable_warnings
-- source include/have_bdb.inc
# Signed number as rowid
create table t1 (
pk1 int not NULL,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values (-5, 1, 1),
(-100, 1, 1),
(3, 1, 1),
(0, 1, 1),
(10, 1, 1);
explain select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
drop table t1;
# Unsigned numbers as rowids
create table t1 (
pk1 int unsigned not NULL,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values (0, 1, 1),
(0xFFFFFFFF, 1, 1),
(0xFFFFFFFE, 1, 1),
(1, 1, 1),
(2, 1, 1);
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
drop table t1;
# Case-insensitive char(N)
create table t1 (
pk1 char(4) not NULL,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb collate latin2_general_ci;
insert into t1 values ('a1', 1, 1),
('b2', 1, 1),
('A3', 1, 1),
('B4', 1, 1);
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
drop table t1;
# Multi-part PK
create table t1 (
pk1 int not NULL,
pk2 char(4) not NULL collate latin1_german1_ci,
pk3 char(4) not NULL collate latin1_bin,
key1 int(11),
key2 int(11),
PRIMARY KEY (pk1,pk2,pk3),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values
(1, 'u', 'u', 1, 1),
(1, 'u', char(0xEC), 1, 1),
(1, 'u', 'x', 1, 1);
insert ignore into t1 select pk1, char(0xEC), pk3, key1, key2 from t1;
insert ignore into t1 select pk1, 'x', pk3, key1, key2 from t1 where pk2='u';
insert ignore into t1 select 2, pk2, pk3, key1, key2 from t1;
select * from t1;
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
# Hidden PK
alter table t1 drop primary key;
select * from t1;
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
drop table t1;
# Variable-length PK
# this is also test for Bug#2688
create table t1 (
pk1 varchar(8) NOT NULL default '',
pk2 varchar(4) NOT NULL default '',
key1 int(11),
key2 int(11),
primary key(pk1, pk2),
KEY key1 (key1),
KEY key2 (key2)
) engine=bdb;
insert into t1 values ('','empt',2,2),
('a','a--a',2,2),
('bb','b--b',2,2),
('ccc','c--c',2,2),
('dddd','d--d',2,2);
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
drop table t1;

View File

@ -42,7 +42,7 @@ base64_needed_encoded_length(int length_of_data)
int int
base64_needed_decoded_length(int length_of_encoded_data) base64_needed_decoded_length(int length_of_encoded_data)
{ {
return ceil(length_of_encoded_data * 3 / 4); return (int)ceil(length_of_encoded_data * 3 / 4);
} }

View File

@ -46,7 +46,7 @@ uint my_pread(File Filedes, byte *Buffer, uint Count, my_off_t offset,
before seeking to the given offset before seeking to the given offset
*/ */
error= (old_offset= lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L || error= (old_offset= (off_t)lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L ||
lseek(Filedes, offset, MY_SEEK_SET) == -1L; lseek(Filedes, offset, MY_SEEK_SET) == -1L;
if (!error) /* Seek was successful */ if (!error) /* Seek was successful */
@ -121,7 +121,7 @@ uint my_pwrite(int Filedes, const byte *Buffer, uint Count, my_off_t offset,
As we cannot change the file pointer, we save the old position, As we cannot change the file pointer, we save the old position,
before seeking to the given offset before seeking to the given offset
*/ */
error= ((old_offset= lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L || error= ((old_offset= (off_t)lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L ||
lseek(Filedes, offset, MY_SEEK_SET) == -1L); lseek(Filedes, offset, MY_SEEK_SET) == -1L);
if (!error) /* Seek was successful */ if (!error) /* Seek was successful */

View File

@ -361,7 +361,6 @@ int mysql_install_db(int argc, char *argv[])
add_arg(&al, "--bootstrap"); add_arg(&al, "--bootstrap");
add_arg(&al, "--skip-grant-tables"); add_arg(&al, "--skip-grant-tables");
add_arg(&al, "--skip-innodb"); add_arg(&al, "--skip-innodb");
add_arg(&al, "--skip-bdb");
// spawn mysqld // spawn mysqld
err = spawn(mysqld, &al, TRUE, sql_file, out_log, err_log); err = spawn(mysqld, &al, TRUE, sql_file, out_log, err_log);

View File

@ -210,7 +210,6 @@ void install_db(char *datadir)
add_arg(&al, "--basedir=%s", base_dir); add_arg(&al, "--basedir=%s", base_dir);
add_arg(&al, "--datadir=%s", datadir); add_arg(&al, "--datadir=%s", datadir);
add_arg(&al, "--skip-innodb"); add_arg(&al, "--skip-innodb");
add_arg(&al, "--skip-bdb");
// spawn // spawn
if ((err = spawn(mysqld_file, &al, TRUE, input, output, error)) != 0) if ((err = spawn(mysqld_file, &al, TRUE, input, output, error)) != 0)

View File

@ -212,7 +212,7 @@ then
fi fi
mysqld_install_cmd_line="$mysqld $defaults $mysqld_opt --bootstrap \ mysqld_install_cmd_line="$mysqld $defaults $mysqld_opt --bootstrap \
--skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb \ --skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb \
--skip-bdb --skip-ndbcluster $args --max_allowed_packet=8M --net_buffer_length=16K" --skip-ndbcluster $args --max_allowed_packet=8M --net_buffer_length=16K"
if $scriptdir/mysql_create_system_tables $create_option $mdata $hostname $windows \ if $scriptdir/mysql_create_system_tables $create_option $mdata $hostname $windows \
| eval "$mysqld_install_cmd_line" | eval "$mysqld_install_cmd_line"
then then

View File

@ -429,7 +429,7 @@ str_to_datetime(const char *str, uint length, MYSQL_TIME *l_time,
goto err; goto err;
} }
if (check_date(l_time, not_zero_date, flags, was_cut)) if ((my_bool)check_date(l_time, not_zero_date, flags, was_cut))
goto err; goto err;
l_time->time_type= (number_of_fields <= 3 ? l_time->time_type= (number_of_fields <= 3 ?

View File

@ -8,8 +8,7 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
${CMAKE_SOURCE_DIR}/sql ${CMAKE_SOURCE_DIR}/sql
${CMAKE_SOURCE_DIR}/regex ${CMAKE_SOURCE_DIR}/regex
${CMAKE_SOURCE_DIR}/zlib ${CMAKE_SOURCE_DIR}/zlib
${CMAKE_SOURCE_DIR}/storage/bdb/build_win32 )
${CMAKE_SOURCE_DIR}/storage/bdb/dbinc)
SET_SOURCE_FILES_PROPERTIES(${CMAKE_SOURCE_DIR}/sql/message.rc SET_SOURCE_FILES_PROPERTIES(${CMAKE_SOURCE_DIR}/sql/message.rc
${CMAKE_SOURCE_DIR}/sql/message.h ${CMAKE_SOURCE_DIR}/sql/message.h
@ -29,7 +28,7 @@ ADD_DEFINITIONS(-DHAVE_ROW_BASED_REPLICATION -DMYSQL_SERVER
ADD_EXECUTABLE(mysqld ../sql-common/client.c derror.cc des_key_file.cc ADD_EXECUTABLE(mysqld ../sql-common/client.c derror.cc des_key_file.cc
discover.cc ../libmysql/errmsg.c field.cc field_conv.cc discover.cc ../libmysql/errmsg.c field.cc field_conv.cc
filesort.cc gstream.cc ha_heap.cc ha_myisam.cc ha_myisammrg.cc filesort.cc gstream.cc ha_heap.cc ha_myisam.cc ha_myisammrg.cc
ha_innodb.cc ha_partition.cc ha_federated.cc ha_berkeley.cc ha_innodb.cc ha_partition.cc ha_federated.cc
handler.cc hash_filo.cc hash_filo.h handler.cc hash_filo.cc hash_filo.h
hostname.cc init.cc item.cc item_buff.cc item_cmpfunc.cc hostname.cc init.cc item.cc item_buff.cc item_cmpfunc.cc
item_create.cc item_func.cc item_geofunc.cc item_row.cc item_create.cc item_func.cc item_geofunc.cc item_row.cc
@ -79,9 +78,6 @@ ENDIF(WITH_EXAMPLE_STORAGE_ENGINE)
IF(WITH_INNOBASE_STORAGE_ENGINE) IF(WITH_INNOBASE_STORAGE_ENGINE)
TARGET_LINK_LIBRARIES(mysqld innobase) TARGET_LINK_LIBRARIES(mysqld innobase)
ENDIF(WITH_INNOBASE_STORAGE_ENGINE) ENDIF(WITH_INNOBASE_STORAGE_ENGINE)
IF(WITH_BERKELEY_STORAGE_ENGINE)
TARGET_LINK_LIBRARIES(mysqld bdb)
ENDIF(WITH_BERKELEY_STORAGE_ENGINE)
ADD_DEPENDENCIES(mysqld GenError) ADD_DEPENDENCIES(mysqld GenError)

View File

@ -47,10 +47,10 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
item_create.h item_subselect.h item_row.h \ item_create.h item_subselect.h item_row.h \
mysql_priv.h item_geofunc.h sql_bitmap.h \ mysql_priv.h item_geofunc.h sql_bitmap.h \
procedure.h sql_class.h sql_lex.h sql_list.h \ procedure.h sql_class.h sql_lex.h sql_list.h \
sql_manager.h sql_map.h sql_string.h unireg.h \ sql_map.h sql_string.h unireg.h \
sql_error.h field.h handler.h mysqld_suffix.h \ sql_error.h field.h handler.h mysqld_suffix.h \
ha_heap.h ha_myisam.h ha_myisammrg.h ha_partition.h \ ha_heap.h ha_myisam.h ha_myisammrg.h ha_partition.h \
ha_innodb.h ha_berkeley.h ha_federated.h \ ha_innodb.h ha_federated.h \
ha_ndbcluster.h ha_ndbcluster_binlog.h \ ha_ndbcluster.h ha_ndbcluster_binlog.h \
ha_ndbcluster_tables.h \ ha_ndbcluster_tables.h \
opt_range.h protocol.h rpl_tblmap.h \ opt_range.h protocol.h rpl_tblmap.h \
@ -88,7 +88,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \
discover.cc time.cc opt_range.cc opt_sum.cc \ discover.cc time.cc opt_range.cc opt_sum.cc \
records.cc filesort.cc handler.cc \ records.cc filesort.cc handler.cc \
ha_heap.cc ha_myisam.cc ha_myisammrg.cc \ ha_heap.cc ha_myisam.cc ha_myisammrg.cc \
ha_partition.cc ha_innodb.cc ha_berkeley.cc \ ha_partition.cc ha_innodb.cc \
ha_federated.cc \ ha_federated.cc \
ha_ndbcluster.cc ha_ndbcluster_binlog.cc \ ha_ndbcluster.cc ha_ndbcluster_binlog.cc \
sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \ sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \
@ -161,9 +161,6 @@ lex_hash.h: gen_lex_hash$(EXEEXT)
./gen_lex_hash$(EXEEXT) > $@ ./gen_lex_hash$(EXEEXT) > $@
# the following three should eventually be moved out of this directory # the following three should eventually be moved out of this directory
ha_berkeley.o: ha_berkeley.cc ha_berkeley.h
$(CXXCOMPILE) @bdb_includes@ $(LM_CFLAGS) -c $<
ha_ndbcluster.o:ha_ndbcluster.cc ha_ndbcluster.h ha_ndbcluster.o:ha_ndbcluster.cc ha_ndbcluster.h
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $< $(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<

View File

@ -118,6 +118,11 @@ public:
*/ */
virtual String *val_str(String*,String *)=0; virtual String *val_str(String*,String *)=0;
String *val_int_as_str(String *val_buffer, my_bool unsigned_flag); String *val_int_as_str(String *val_buffer, my_bool unsigned_flag);
/*
str_needs_quotes() returns TRUE if the value returned by val_str() needs
to be quoted when used in constructing an SQL query.
*/
virtual bool str_needs_quotes() { return FALSE; }
virtual Item_result result_type () const=0; virtual Item_result result_type () const=0;
virtual Item_result cmp_type () const { return result_type(); } virtual Item_result cmp_type () const { return result_type(); }
virtual Item_result cast_to_int_type () const { return result_type(); } virtual Item_result cast_to_int_type () const { return result_type(); }
@ -417,6 +422,7 @@ public:
uint32 max_length() { return field_length; } uint32 max_length() { return field_length; }
friend class create_field; friend class create_field;
my_decimal *val_decimal(my_decimal *); my_decimal *val_decimal(my_decimal *);
virtual bool str_needs_quotes() { return TRUE; }
uint is_equal(create_field *new_field); uint is_equal(create_field *new_field);
}; };
@ -1385,6 +1391,7 @@ public:
double val_real(void); double val_real(void);
longlong val_int(void); longlong val_int(void);
String *val_str(String*, String *); String *val_str(String*, String *);
virtual bool str_needs_quotes() { return TRUE; }
my_decimal *val_decimal(my_decimal *); my_decimal *val_decimal(my_decimal *);
int cmp(const char *a, const char *b) int cmp(const char *a, const char *b)
{ return cmp_binary(a, b); } { return cmp_binary(a, b); }

File diff suppressed because it is too large Load Diff

View File

@ -1,180 +0,0 @@
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifdef USE_PRAGMA_INTERFACE
#pragma interface /* gcc class implementation */
#endif
/* class for the the myisam handler */
#include <db.h>
#define BDB_HIDDEN_PRIMARY_KEY_LENGTH 5
typedef struct st_berkeley_share {
ulonglong auto_ident;
ha_rows rows, org_rows;
ulong *rec_per_key;
THR_LOCK lock;
pthread_mutex_t mutex;
char *table_name;
DB *status_block, *file, **key_file;
u_int32_t *key_type;
uint table_name_length,use_count;
uint status,version;
uint ref_length;
bool fixed_length_primary_key, fixed_length_row;
} BDB_SHARE;
class ha_berkeley: public handler
{
THR_LOCK_DATA lock;
DBT last_key,current_row;
gptr alloc_ptr;
byte *rec_buff;
char *key_buff, *key_buff2, *primary_key_buff;
DB *file, **key_file;
DB_TXN *transaction;
u_int32_t *key_type;
DBC *cursor;
BDB_SHARE *share;
ulong int_table_flags;
ulong alloced_rec_buff_length;
ulong changed_rows;
uint primary_key,last_dup_key, hidden_primary_key, version;
bool key_read, using_ignore;
bool fix_rec_buff_for_blob(ulong length);
byte current_ident[BDB_HIDDEN_PRIMARY_KEY_LENGTH];
ulong max_row_length(const byte *buf);
int pack_row(DBT *row,const byte *record, bool new_row);
void unpack_row(char *record, DBT *row);
void unpack_key(char *record, DBT *key, uint index);
DBT *create_key(DBT *key, uint keynr, char *buff, const byte *record,
int key_length = MAX_KEY_LENGTH);
DBT *pack_key(DBT *key, uint keynr, char *buff, const byte *key_ptr,
uint key_length);
int remove_key(DB_TXN *trans, uint keynr, const byte *record, DBT *prim_key);
int remove_keys(DB_TXN *trans,const byte *record, DBT *new_record,
DBT *prim_key, key_map *keys);
int restore_keys(DB_TXN *trans, key_map *changed_keys, uint primary_key,
const byte *old_row, DBT *old_key,
const byte *new_row, DBT *new_key);
int key_cmp(uint keynr, const byte * old_row, const byte * new_row);
int update_primary_key(DB_TXN *trans, bool primary_key_changed,
const byte * old_row, DBT *old_key,
const byte * new_row, DBT *prim_key,
bool local_using_ignore);
int read_row(int error, char *buf, uint keynr, DBT *row, DBT *key, bool);
DBT *get_pos(DBT *to, byte *pos);
public:
ha_berkeley(TABLE_SHARE *table_arg);
~ha_berkeley() {}
const char *table_type() const { return "BerkeleyDB"; }
ulong index_flags(uint idx, uint part, bool all_parts) const;
const char *index_type(uint key_number) { return "BTREE"; }
const char **bas_ext() const;
ulonglong table_flags(void) const { return int_table_flags; }
uint max_supported_keys() const { return MAX_KEY-1; }
uint extra_rec_buf_length() const { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; }
ha_rows estimate_rows_upper_bound();
uint max_supported_key_length() const { return UINT_MAX32; }
uint max_supported_key_part_length() const { return UINT_MAX32; }
const key_map *keys_to_use_for_scanning() { return &key_map_full; }
int open(const char *name, int mode, uint test_if_locked);
int close(void);
double scan_time();
int write_row(byte * buf);
int update_row(const byte * old_data, byte * new_data);
int delete_row(const byte * buf);
int index_init(uint index, bool sorted);
int index_end();
int index_read(byte * buf, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
int index_read_idx(byte * buf, uint index, const byte * key,
uint key_len, enum ha_rkey_function find_flag);
int index_read_last(byte * buf, const byte * key, uint key_len);
int index_next(byte * buf);
int index_next_same(byte * buf, const byte *key, uint keylen);
int index_prev(byte * buf);
int index_first(byte * buf);
int index_last(byte * buf);
int rnd_init(bool scan);
int rnd_end();
int rnd_next(byte *buf);
int rnd_pos(byte * buf, byte *pos);
void position(const byte *record);
void info(uint);
int extra(enum ha_extra_function operation);
int reset(void);
int external_lock(THD *thd, int lock_type);
int start_stmt(THD *thd, thr_lock_type lock_type);
void position(byte *record);
int analyze(THD* thd,HA_CHECK_OPT* check_opt);
int optimize(THD* thd, HA_CHECK_OPT* check_opt);
int check(THD* thd, HA_CHECK_OPT* check_opt);
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
int create(const char *name, register TABLE *form,
HA_CREATE_INFO *create_info);
int delete_table(const char *name);
int rename_table(const char* from, const char* to);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
void get_status();
inline void get_auto_primary_key(byte *to)
{
pthread_mutex_lock(&share->mutex);
share->auto_ident++;
int5store(to,share->auto_ident);
pthread_mutex_unlock(&share->mutex);
}
virtual void get_auto_increment(ulonglong offset, ulonglong increment,
ulonglong nb_desired_values,
ulonglong *first_value,
ulonglong *nb_reserved_values);
void print_error(int error, myf errflag);
uint8 table_cache_type() { return HA_CACHE_TBL_TRANSACT; }
bool primary_key_is_clustered() { return true; }
int cmp_ref(const byte *ref1, const byte *ref2);
bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
};
extern const u_int32_t bdb_DB_TXN_NOSYNC;
extern const u_int32_t bdb_DB_RECOVER;
extern const u_int32_t bdb_DB_PRIVATE;
extern const u_int32_t bdb_DB_DIRECT_DB;
extern const u_int32_t bdb_DB_DIRECT_LOG;
extern bool berkeley_shared_data;
extern u_int32_t berkeley_init_flags,berkeley_env_flags, berkeley_lock_type,
berkeley_lock_types[];
extern ulong berkeley_max_lock, berkeley_log_buffer_size;
extern ulonglong berkeley_cache_size;
extern ulong berkeley_region_size, berkeley_cache_parts;
extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
extern long berkeley_lock_scan_time;
extern TYPELIB berkeley_lock_typelib;
int berkeley_init(void);
int berkeley_end(ha_panic_function type);
bool berkeley_flush_logs(void);
bool berkeley_show_status(THD *thd, stat_print_fn *print, enum ha_stat_type);

View File

@ -1142,7 +1142,7 @@ bool ha_federated::create_where_from_key(String *to,
Field *field= key_part->field; Field *field= key_part->field;
uint store_length= key_part->store_length; uint store_length= key_part->store_length;
uint part_length= min(store_length, length); uint part_length= min(store_length, length);
needs_quotes= 1; needs_quotes= field->str_needs_quotes();
DBUG_DUMP("key, start of loop", (char *) ptr, length); DBUG_DUMP("key, start of loop", (char *) ptr, length);
if (key_part->null_bit) if (key_part->null_bit)
@ -1663,23 +1663,22 @@ int ha_federated::write_row(byte *buf)
{ {
commas_added= TRUE; commas_added= TRUE;
if ((*field)->is_null()) if ((*field)->is_null())
insert_field_value_string.append(STRING_WITH_LEN(" NULL ")); values_string.append(STRING_WITH_LEN(" NULL "));
else else
{ {
bool needs_quote= (*field)->str_needs_quotes();
(*field)->val_str(&insert_field_value_string); (*field)->val_str(&insert_field_value_string);
values_string.append('\''); if (needs_quote)
values_string.append('\'');
insert_field_value_string.print(&values_string); insert_field_value_string.print(&values_string);
values_string.append('\''); if (needs_quote)
values_string.append('\'');
insert_field_value_string.length(0); insert_field_value_string.length(0);
} }
/* append the field name */ /* append the field name */
insert_string.append((*field)->field_name); insert_string.append((*field)->field_name);
/* append the value */
values_string.append(insert_field_value_string);
insert_field_value_string.length(0);
/* append commas between both fields and fieldnames */ /* append commas between both fields and fieldnames */
/* /*
unfortunately, we can't use the logic if *(fields + 1) to unfortunately, we can't use the logic if *(fields + 1) to
@ -1884,12 +1883,15 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
update_string.append(STRING_WITH_LEN(" NULL ")); update_string.append(STRING_WITH_LEN(" NULL "));
else else
{ {
my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
/* otherwise = */ /* otherwise = */
my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
bool needs_quote= (*field)->str_needs_quotes();
(*field)->val_str(&field_value); (*field)->val_str(&field_value);
update_string.append('\''); if (needs_quote)
update_string.append('\'');
field_value.print(&update_string); field_value.print(&update_string);
update_string.append('\''); if (needs_quote)
update_string.append('\'');
field_value.length(0); field_value.length(0);
tmp_restore_column_map(table->read_set, old_map); tmp_restore_column_map(table->read_set, old_map);
} }
@ -1903,12 +1905,15 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
where_string.append(STRING_WITH_LEN(" IS NULL ")); where_string.append(STRING_WITH_LEN(" IS NULL "));
else else
{ {
bool needs_quote= (*field)->str_needs_quotes();
where_string.append(STRING_WITH_LEN(" = ")); where_string.append(STRING_WITH_LEN(" = "));
(*field)->val_str(&field_value, (*field)->val_str(&field_value,
(char*) (old_data + (*field)->offset())); (char*) (old_data + (*field)->offset()));
where_string.append('\''); if (needs_quote)
where_string.append('\'');
field_value.print(&where_string); field_value.print(&where_string);
where_string.append('\''); if (needs_quote)
where_string.append('\'');
field_value.length(0); field_value.length(0);
} }
where_string.append(STRING_WITH_LEN(" AND ")); where_string.append(STRING_WITH_LEN(" AND "));
@ -1983,11 +1988,14 @@ int ha_federated::delete_row(const byte *buf)
} }
else else
{ {
delete_string.append(STRING_WITH_LEN(" = ")); bool needs_quote= cur_field->str_needs_quotes();
cur_field->val_str(&data_string); delete_string.append(STRING_WITH_LEN(" = "));
delete_string.append('\''); cur_field->val_str(&data_string);
data_string.print(&delete_string); if (needs_quote)
delete_string.append('\''); delete_string.append('\'');
data_string.print(&delete_string);
if (needs_quote)
delete_string.append('\'');
} }
delete_string.append(STRING_WITH_LEN(" AND ")); delete_string.append(STRING_WITH_LEN(" AND "));
} }

View File

@ -74,7 +74,6 @@ static const LEX_STRING sys_table_aliases[]=
{ {
{(char*)STRING_WITH_LEN("INNOBASE")}, {(char*)STRING_WITH_LEN("INNODB")}, {(char*)STRING_WITH_LEN("INNOBASE")}, {(char*)STRING_WITH_LEN("INNODB")},
{(char*)STRING_WITH_LEN("NDB")}, {(char*)STRING_WITH_LEN("NDBCLUSTER")}, {(char*)STRING_WITH_LEN("NDB")}, {(char*)STRING_WITH_LEN("NDBCLUSTER")},
{(char*)STRING_WITH_LEN("BDB")}, {(char*)STRING_WITH_LEN("BERKELEYDB")},
{(char*)STRING_WITH_LEN("HEAP")}, {(char*)STRING_WITH_LEN("MEMORY")}, {(char*)STRING_WITH_LEN("HEAP")}, {(char*)STRING_WITH_LEN("MEMORY")},
{(char*)STRING_WITH_LEN("MERGE")}, {(char*)STRING_WITH_LEN("MRG_MYISAM")}, {(char*)STRING_WITH_LEN("MERGE")}, {(char*)STRING_WITH_LEN("MRG_MYISAM")},
{NullS, 0} {NullS, 0}
@ -1508,7 +1507,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
/* /*
Read first row (only) from a table Read first row (only) from a table
This is never called for InnoDB or BDB tables, as these table types This is never called for InnoDB tables, as these table types
has the HA_STATS_RECORDS_IS_EXACT set. has the HA_STATS_RECORDS_IS_EXACT set.
*/ */

View File

@ -82,10 +82,8 @@ static SYMBOL symbols[] = {
{ "AVG", SYM(AVG_SYM)}, { "AVG", SYM(AVG_SYM)},
{ "AVG_ROW_LENGTH", SYM(AVG_ROW_LENGTH)}, { "AVG_ROW_LENGTH", SYM(AVG_ROW_LENGTH)},
{ "BACKUP", SYM(BACKUP_SYM)}, { "BACKUP", SYM(BACKUP_SYM)},
{ "BDB", SYM(BERKELEY_DB_SYM)},
{ "BEFORE", SYM(BEFORE_SYM)}, { "BEFORE", SYM(BEFORE_SYM)},
{ "BEGIN", SYM(BEGIN_SYM)}, { "BEGIN", SYM(BEGIN_SYM)},
{ "BERKELEYDB", SYM(BERKELEY_DB_SYM)},
{ "BETWEEN", SYM(BETWEEN_SYM)}, { "BETWEEN", SYM(BETWEEN_SYM)},
{ "BIGINT", SYM(BIGINT)}, { "BIGINT", SYM(BIGINT)},
{ "BINARY", SYM(BINARY)}, { "BINARY", SYM(BINARY)},

View File

@ -1620,12 +1620,6 @@ extern handlerton innobase_hton;
#else #else
extern SHOW_COMP_OPTION have_innodb; extern SHOW_COMP_OPTION have_innodb;
#endif #endif
#ifdef WITH_BERKELEY_STORAGE_ENGINE
extern handlerton berkeley_hton;
#define have_berkeley_db berkeley_hton.state
#else
extern SHOW_COMP_OPTION have_berkeley_db;
#endif
#ifdef WITH_EXAMPLE_STORAGE_ENGINE #ifdef WITH_EXAMPLE_STORAGE_ENGINE
extern handlerton example_hton; extern handlerton example_hton;
#define have_example_db example_hton.state #define have_example_db example_hton.state

View File

@ -310,7 +310,7 @@ static bool lower_case_table_names_used= 0;
static bool volatile select_thread_in_use, signal_thread_in_use; static bool volatile select_thread_in_use, signal_thread_in_use;
static bool volatile ready_to_exit; static bool volatile ready_to_exit;
static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0; static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0;
static my_bool opt_bdb, opt_isam, opt_ndbcluster, opt_merge; static my_bool opt_isam, opt_ndbcluster, opt_merge;
static my_bool opt_short_log_format= 0; static my_bool opt_short_log_format= 0;
static uint kill_cached_threads, wake_thread; static uint kill_cached_threads, wake_thread;
static ulong killed_threads, thread_created; static ulong killed_threads, thread_created;
@ -332,10 +332,6 @@ static I_List<THD> thread_cache;
static pthread_cond_t COND_thread_cache, COND_flush_thread_cache; static pthread_cond_t COND_thread_cache, COND_flush_thread_cache;
#ifdef WITH_BERKELEY_STORAGE_ENGINE
static my_bool opt_sync_bdb_logs;
#endif
/* Global variables */ /* Global variables */
bool opt_update_log, opt_bin_log; bool opt_update_log, opt_bin_log;
@ -405,22 +401,6 @@ extern ulong srv_commit_concurrency;
extern ulong srv_flush_log_at_trx_commit; extern ulong srv_flush_log_at_trx_commit;
} }
#endif #endif
#ifdef WITH_BERKELEY_STORAGE_ENGINE
#ifndef HAVE_U_INT32_T
typedef unsigned int u_int32_t;
#endif
extern const u_int32_t bdb_DB_TXN_NOSYNC, bdb_DB_RECOVER, bdb_DB_PRIVATE,
bdb_DB_DIRECT_DB, bdb_DB_DIRECT_LOG;
extern bool berkeley_shared_data;
extern u_int32_t berkeley_init_flags,berkeley_env_flags, berkeley_lock_type,
berkeley_lock_types[];
extern ulong berkeley_max_lock, berkeley_log_buffer_size;
extern ulonglong berkeley_cache_size;
extern ulong berkeley_region_size, berkeley_cache_parts;
extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
extern long berkeley_lock_scan_time;
extern TYPELIB berkeley_lock_typelib;
#endif
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
const char *opt_ndbcluster_connectstring= 0; const char *opt_ndbcluster_connectstring= 0;
@ -3355,11 +3335,7 @@ server.");
static void create_maintenance_thread() static void create_maintenance_thread()
{ {
if ( if (flush_time && flush_time != ~(ulong) 0L)
#ifdef WITH_BERKELEY_STORAGE_ENGINE
(have_berkeley_db == SHOW_OPTION_YES) ||
#endif
(flush_time && flush_time != ~(ulong) 0L))
{ {
pthread_t hThread; pthread_t hThread;
if (pthread_create(&hThread,&connection_attrib,handle_manager,0)) if (pthread_create(&hThread,&connection_attrib,handle_manager,0))
@ -4901,38 +4877,6 @@ struct my_option my_long_options[] =
"Path to installation directory. All paths are usually resolved relative to this.", "Path to installation directory. All paths are usually resolved relative to this.",
(gptr*) &mysql_home_ptr, (gptr*) &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG, (gptr*) &mysql_home_ptr, (gptr*) &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG,
0, 0, 0, 0, 0, 0}, 0, 0, 0, 0, 0, 0},
{"bdb", OPT_BDB, "Enable Berkeley DB (if this version of MySQL supports it). \
Disable with --skip-bdb (will save memory).",
(gptr*) &opt_bdb, (gptr*) &opt_bdb, 0, GET_BOOL, NO_ARG, OPT_BDB_DEFAULT, 0, 0,
0, 0, 0},
#ifdef WITH_BERKELEY_STORAGE_ENGINE
{"bdb-data-direct", OPT_BDB_DATA_DIRECT,
"Turn off system buffering of BDB database files to avoid double caching.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"bdb-home", OPT_BDB_HOME, "Berkeley home directory.", (gptr*) &berkeley_home,
(gptr*) &berkeley_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"bdb-lock-detect", OPT_BDB_LOCK,
"Berkeley lock detect (DEFAULT, OLDEST, RANDOM or YOUNGEST, # sec).",
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"bdb-log-direct", OPT_BDB_LOG_DIRECT,
"Turn off system buffering of BDB log files to avoid double caching.",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"bdb-logdir", OPT_BDB_LOG, "Berkeley DB log file directory.",
(gptr*) &berkeley_logdir, (gptr*) &berkeley_logdir, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
{"bdb-no-recover", OPT_BDB_NO_RECOVER,
"Don't try to recover Berkeley DB tables on start.", 0, 0, 0, GET_NO_ARG,
NO_ARG, 0, 0, 0, 0, 0, 0},
{"bdb-no-sync", OPT_BDB_NOSYNC,
"This option is deprecated, use --skip-sync-bdb-logs instead",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"bdb-shared-data", OPT_BDB_SHARED,
"Start Berkeley DB in multi-process mode.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0,
0, 0, 0, 0, 0},
{"bdb-tmpdir", OPT_BDB_TMP, "Berkeley DB tempfile name.",
(gptr*) &berkeley_tmpdir, (gptr*) &berkeley_tmpdir, 0, GET_STR,
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
{"big-tables", OPT_BIG_TABLES, {"big-tables", OPT_BIG_TABLES,
"Allow big result sets by saving all temporary sets on file (Solves most 'table full' errors).", "Allow big result sets by saving all temporary sets on file (Solves most 'table full' errors).",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
@ -5747,31 +5691,6 @@ log and this option does nothing anymore.",
"The number of outstanding connection requests MySQL can have. This comes into play when the main MySQL thread gets very many connection requests in a very short time.", "The number of outstanding connection requests MySQL can have. This comes into play when the main MySQL thread gets very many connection requests in a very short time.",
(gptr*) &back_log, (gptr*) &back_log, 0, GET_ULONG, (gptr*) &back_log, (gptr*) &back_log, 0, GET_ULONG,
REQUIRED_ARG, 50, 1, 65535, 0, 1, 0 }, REQUIRED_ARG, 50, 1, 65535, 0, 1, 0 },
#ifdef WITH_BERKELEY_STORAGE_ENGINE
{ "bdb_cache_parts", OPT_BDB_CACHE_PARTS,
"Number of parts to use for BDB cache.",
(gptr*) &berkeley_cache_parts, (gptr*) &berkeley_cache_parts, 0, GET_ULONG,
REQUIRED_ARG, 1, 1, 1024, 0, 1, 0},
{ "bdb_cache_size", OPT_BDB_CACHE_SIZE,
"The buffer that is allocated to cache index and rows for BDB tables.",
(gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULL,
REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, (ulonglong) ~0, 0, IO_SIZE, 0},
{"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.",
(gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
{"bdb_log_buffer_size", OPT_BDB_LOG_BUFFER_SIZE,
"The buffer that is allocated to cache index and rows for BDB tables.",
(gptr*) &berkeley_log_buffer_size, (gptr*) &berkeley_log_buffer_size, 0,
GET_ULONG, REQUIRED_ARG, 0, 256*1024L, ~0L, 0, 1024, 0},
{"bdb_max_lock", OPT_BDB_MAX_LOCK,
"The maximum number of locks you can have active on a BDB table.",
(gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
{"bdb_region_size", OPT_BDB_REGION_SIZE,
"The size of the underlying logging area of the Berkeley DB environment.",
(gptr*) &berkeley_region_size, (gptr*) &berkeley_region_size, 0, GET_ULONG,
OPT_ARG, 60*1024L, 60*1024L, (long) ~0, 0, 1, 0},
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
{"binlog_cache_size", OPT_BINLOG_CACHE_SIZE, {"binlog_cache_size", OPT_BINLOG_CACHE_SIZE,
"The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.", "The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.",
(gptr*) &binlog_cache_size, (gptr*) &binlog_cache_size, 0, GET_ULONG, (gptr*) &binlog_cache_size, (gptr*) &binlog_cache_size, 0, GET_ULONG,
@ -6263,12 +6182,6 @@ The minimum value for this variable is 4096.",
(gptr*) &max_system_variables.sortbuff_size, 0, GET_ULONG, REQUIRED_ARG, (gptr*) &max_system_variables.sortbuff_size, 0, GET_ULONG, REQUIRED_ARG,
MAX_SORT_MEMORY, MIN_SORT_MEMORY+MALLOC_OVERHEAD*2, ~0L, MALLOC_OVERHEAD, MAX_SORT_MEMORY, MIN_SORT_MEMORY+MALLOC_OVERHEAD*2, ~0L, MALLOC_OVERHEAD,
1, 0}, 1, 0},
#ifdef WITH_BERKELEY_STORAGE_ENGINE
{"sync-bdb-logs", OPT_BDB_SYNC,
"Synchronously flush Berkeley DB logs. Enabled by default",
(gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL,
NO_ARG, 1, 0, 0, 0, 0, 0},
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
{"sync-binlog", OPT_SYNC_BINLOG, {"sync-binlog", OPT_SYNC_BINLOG,
"Synchronously flush binary log to disk after every #th event. " "Synchronously flush binary log to disk after every #th event. "
"Use 0 (default) to disable synchronous flushing.", "Use 0 (default) to disable synchronous flushing.",
@ -7583,59 +7496,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
have_merge_db= SHOW_OPTION_YES; have_merge_db= SHOW_OPTION_YES;
else else
have_merge_db= SHOW_OPTION_DISABLED; have_merge_db= SHOW_OPTION_DISABLED;
#ifdef WITH_BERKELEY_STORAGE_ENGINE
case OPT_BDB_NOSYNC:
/* Deprecated option */
opt_sync_bdb_logs= 0;
/* Fall through */
case OPT_BDB_SYNC:
if (!opt_sync_bdb_logs)
berkeley_env_flags|= bdb_DB_TXN_NOSYNC;
else
berkeley_env_flags&= ~bdb_DB_TXN_NOSYNC;
break;
case OPT_BDB_LOG_DIRECT:
berkeley_env_flags|= bdb_DB_DIRECT_DB;
break;
case OPT_BDB_DATA_DIRECT:
berkeley_env_flags|= bdb_DB_DIRECT_LOG;
break;
case OPT_BDB_NO_RECOVER:
berkeley_init_flags&= ~(bdb_DB_RECOVER);
break;
case OPT_BDB_LOCK:
{
int type;
if ((type=find_type(argument, &berkeley_lock_typelib, 2)) > 0)
berkeley_lock_type=berkeley_lock_types[type-1];
else
{
int err;
char *end;
uint length= strlen(argument);
long value= my_strntol(&my_charset_latin1, argument, length, 10, &end, &err);
if (end == argument+length)
berkeley_lock_scan_time= value;
else
{
fprintf(stderr,"Unknown lock type: %s\n",argument);
exit(1);
}
}
break;
}
case OPT_BDB_SHARED:
berkeley_init_flags&= ~(bdb_DB_PRIVATE);
berkeley_shared_data= 1;
break;
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
case OPT_BDB: case OPT_BDB:
#ifdef WITH_BERKELEY_STORAGE_ENGINE
if (opt_bdb)
have_berkeley_db= SHOW_OPTION_YES;
else
have_berkeley_db= SHOW_OPTION_DISABLED;
#endif
break; break;
case OPT_NDBCLUSTER: case OPT_NDBCLUSTER:
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
@ -7868,10 +7729,6 @@ static void get_options(int argc,char **argv)
#ifndef WITH_ISAM_STORAGE_ENGINE #ifndef WITH_ISAM_STORAGE_ENGINE
if (opt_isam) if (opt_isam)
sql_print_warning("this binary does not contain ISAM storage engine"); sql_print_warning("this binary does not contain ISAM storage engine");
#endif
#ifndef WITH_BERKELEY_STORAGE_ENGINE
if (opt_bdb)
sql_print_warning("this binary does not contain BDB storage engine");
#endif #endif
if ((opt_log_slow_admin_statements || opt_log_queries_not_using_indexes) && if ((opt_log_slow_admin_statements || opt_log_queries_not_using_indexes) &&
!opt_slow_log) !opt_slow_log)
@ -8215,7 +8072,6 @@ void refresh_status(THD *thd)
/***************************************************************************** /*****************************************************************************
Instantiate have_xyx for missing storage engines Instantiate have_xyx for missing storage engines
*****************************************************************************/ *****************************************************************************/
#undef have_berkeley_db
#undef have_innodb #undef have_innodb
#undef have_ndbcluster #undef have_ndbcluster
#undef have_example_db #undef have_example_db
@ -8225,7 +8081,6 @@ void refresh_status(THD *thd)
#undef have_partition_db #undef have_partition_db
#undef have_blackhole_db #undef have_blackhole_db
SHOW_COMP_OPTION have_berkeley_db= SHOW_OPTION_NO;
SHOW_COMP_OPTION have_innodb= SHOW_OPTION_NO; SHOW_COMP_OPTION have_innodb= SHOW_OPTION_NO;
SHOW_COMP_OPTION have_ndbcluster= SHOW_OPTION_NO; SHOW_COMP_OPTION have_ndbcluster= SHOW_OPTION_NO;
SHOW_COMP_OPTION have_example_db= SHOW_OPTION_NO; SHOW_COMP_OPTION have_example_db= SHOW_OPTION_NO;
@ -8235,14 +8090,6 @@ SHOW_COMP_OPTION have_federated_db= SHOW_OPTION_NO;
SHOW_COMP_OPTION have_partition_db= SHOW_OPTION_NO; SHOW_COMP_OPTION have_partition_db= SHOW_OPTION_NO;
SHOW_COMP_OPTION have_blackhole_db= SHOW_OPTION_NO; SHOW_COMP_OPTION have_blackhole_db= SHOW_OPTION_NO;
#ifndef WITH_BERKELEY_STORAGE_ENGINE
bool berkeley_shared_data;
ulong berkeley_max_lock, berkeley_log_buffer_size;
ulonglong berkeley_cache_size;
ulong berkeley_region_size, berkeley_cache_parts;
char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
#endif
#ifndef WITH_INNOBASE_STORAGE_ENGINE #ifndef WITH_INNOBASE_STORAGE_ENGINE
uint innobase_flush_log_at_trx_commit; uint innobase_flush_log_at_trx_commit;
ulong innobase_fast_shutdown; ulong innobase_fast_shutdown;

View File

@ -59,13 +59,6 @@
#include "event_scheduler.h" #include "event_scheduler.h"
/* WITH_BERKELEY_STORAGE_ENGINE */
extern bool berkeley_shared_data;
extern ulong berkeley_max_lock, berkeley_log_buffer_size;
extern ulonglong berkeley_cache_size;
extern ulong berkeley_region_size, berkeley_cache_parts;
extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
/* WITH_INNOBASE_STORAGE_ENGINE */ /* WITH_INNOBASE_STORAGE_ENGINE */
extern uint innobase_flush_log_at_trx_commit; extern uint innobase_flush_log_at_trx_commit;
extern ulong innobase_fast_shutdown; extern ulong innobase_fast_shutdown;
@ -669,7 +662,6 @@ sys_var_thd_time_zone sys_time_zone("time_zone");
/* Read only variables */ /* Read only variables */
sys_var_have_variable sys_have_archive_db("have_archive", &have_archive_db); sys_var_have_variable sys_have_archive_db("have_archive", &have_archive_db);
sys_var_have_variable sys_have_berkeley_db("have_bdb", &have_berkeley_db);
sys_var_have_variable sys_have_blackhole_db("have_blackhole_engine", sys_var_have_variable sys_have_blackhole_db("have_blackhole_engine",
&have_blackhole_db); &have_blackhole_db);
sys_var_have_variable sys_have_compress("have_compress", &have_compress); sys_var_have_variable sys_have_compress("have_compress", &have_compress);
@ -760,15 +752,6 @@ SHOW_VAR init_vars[]= {
{sys_automatic_sp_privileges.name,(char*) &sys_automatic_sp_privileges, SHOW_SYS}, {sys_automatic_sp_privileges.name,(char*) &sys_automatic_sp_privileges, SHOW_SYS},
{"back_log", (char*) &back_log, SHOW_LONG}, {"back_log", (char*) &back_log, SHOW_LONG},
{sys_basedir.name, (char*) &sys_basedir, SHOW_SYS}, {sys_basedir.name, (char*) &sys_basedir, SHOW_SYS},
{"bdb_cache_parts", (char*) &berkeley_cache_parts, SHOW_LONG},
{"bdb_cache_size", (char*) &berkeley_cache_size, SHOW_LONGLONG},
{"bdb_home", (char*) &berkeley_home, SHOW_CHAR_PTR},
{"bdb_log_buffer_size", (char*) &berkeley_log_buffer_size, SHOW_LONG},
{"bdb_logdir", (char*) &berkeley_logdir, SHOW_CHAR_PTR},
{"bdb_max_lock", (char*) &berkeley_max_lock, SHOW_LONG},
{"bdb_region_size", (char*) &berkeley_region_size, SHOW_LONG},
{"bdb_shared_data", (char*) &berkeley_shared_data, SHOW_BOOL},
{"bdb_tmpdir", (char*) &berkeley_tmpdir, SHOW_CHAR_PTR},
{sys_binlog_cache_size.name,(char*) &sys_binlog_cache_size, SHOW_SYS}, {sys_binlog_cache_size.name,(char*) &sys_binlog_cache_size, SHOW_SYS},
{sys_binlog_format.name, (char*) &sys_binlog_format, SHOW_SYS}, {sys_binlog_format.name, (char*) &sys_binlog_format, SHOW_SYS},
{sys_bulk_insert_buff_size.name,(char*) &sys_bulk_insert_buff_size,SHOW_SYS}, {sys_bulk_insert_buff_size.name,(char*) &sys_bulk_insert_buff_size,SHOW_SYS},
@ -813,7 +796,6 @@ SHOW_VAR init_vars[]= {
{sys_var_general_log_path.name, (char*) &sys_var_general_log_path, SHOW_SYS}, {sys_var_general_log_path.name, (char*) &sys_var_general_log_path, SHOW_SYS},
{sys_group_concat_max_len.name, (char*) &sys_group_concat_max_len, SHOW_SYS}, {sys_group_concat_max_len.name, (char*) &sys_group_concat_max_len, SHOW_SYS},
{sys_have_archive_db.name, (char*) &have_archive_db, SHOW_HAVE}, {sys_have_archive_db.name, (char*) &have_archive_db, SHOW_HAVE},
{sys_have_berkeley_db.name, (char*) &have_berkeley_db, SHOW_HAVE},
{sys_have_blackhole_db.name,(char*) &have_blackhole_db, SHOW_HAVE}, {sys_have_blackhole_db.name,(char*) &have_blackhole_db, SHOW_HAVE},
{sys_have_compress.name, (char*) &have_compress, SHOW_HAVE}, {sys_have_compress.name, (char*) &have_compress, SHOW_HAVE},
{sys_have_crypt.name, (char*) &have_crypt, SHOW_HAVE}, {sys_have_crypt.name, (char*) &have_crypt, SHOW_HAVE},

File diff suppressed because it is too large Load Diff

View File

@ -23,7 +23,6 @@
*/ */
#include "mysql_priv.h" #include "mysql_priv.h"
#include "sql_manager.h"
ulong volatile manager_status; ulong volatile manager_status;
bool volatile manager_thread_in_use; bool volatile manager_thread_in_use;

View File

@ -1,19 +0,0 @@
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#ifdef WITH_BERKELEY_STORAGE_ENGINE
void berkeley_cleanup_log_files(void);
#endif /* WITH_BERKELEY_STORAGE_ENGINE */

View File

@ -146,7 +146,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token BEFORE_SYM %token BEFORE_SYM
%token BEGIN_SYM %token BEGIN_SYM
%token BENCHMARK_SYM %token BENCHMARK_SYM
%token BERKELEY_DB_SYM
%token BIGINT %token BIGINT
%token BINARY %token BINARY
%token BINLOG_SYM %token BINLOG_SYM
@ -8354,30 +8353,6 @@ show_param:
if (prepare_schema_table(YYTHD, lex, 0, SCH_COLLATIONS)) if (prepare_schema_table(YYTHD, lex, 0, SCH_COLLATIONS))
YYABORT; YYABORT;
} }
| BERKELEY_DB_SYM LOGS_SYM
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS;
if (!(lex->create_info.db_type=
ha_resolve_by_legacy_type(YYTHD, DB_TYPE_BERKELEY_DB)))
{
my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), "BerkeleyDB");
YYABORT;
}
WARN_DEPRECATED(yythd, "5.2", "SHOW BDB LOGS", "'SHOW ENGINE BDB LOGS'");
}
| LOGS_SYM
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS;
if (!(lex->create_info.db_type=
ha_resolve_by_legacy_type(YYTHD, DB_TYPE_BERKELEY_DB)))
{
my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), "BerkeleyDB");
YYABORT;
}
WARN_DEPRECATED(yythd, "5.2", "SHOW LOGS", "'SHOW ENGINE BDB LOGS'");
}
| GRANTS | GRANTS
{ {
LEX *lex=Lex; LEX *lex=Lex;
@ -9408,7 +9383,6 @@ keyword_sp:
| AUTOEXTEND_SIZE_SYM {} | AUTOEXTEND_SIZE_SYM {}
| AVG_ROW_LENGTH {} | AVG_ROW_LENGTH {}
| AVG_SYM {} | AVG_SYM {}
| BERKELEY_DB_SYM {}
| BINLOG_SYM {} | BINLOG_SYM {}
| BIT_SYM {} | BIT_SYM {}
| BOOL_SYM {} | BOOL_SYM {}

View File

@ -1,67 +0,0 @@
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/storage/bdb/build_win32
${CMAKE_SOURCE_DIR}/storage/bdb/dbinc
${CMAKE_SOURCE_DIR}/storage/bdb)
# BDB needs a number of source files that are auto-generated by the unix
# configure. So to build BDB, it is necessary to copy these over to the Windows
# bitkeeper tree, or to use a source .tar.gz package which already has these
# files.
ADD_LIBRARY(bdb crypto/aes_method.c btree/bt_compact.c btree/bt_compare.c
btree/bt_conv.c btree/bt_curadj.c btree/bt_cursor.c
btree/bt_delete.c btree/bt_method.c btree/bt_open.c btree/bt_put.c
btree/bt_rec.c btree/bt_reclaim.c btree/bt_recno.c
btree/bt_rsearch.c btree/bt_search.c btree/bt_split.c
btree/bt_stat.c btree/bt_upgrade.c btree/bt_verify.c
btree/btree_auto.c db/crdel_auto.c db/crdel_rec.c crypto/crypto.c
db/db.c db/db_am.c db/db_auto.c common/db_byteorder.c db/db_cam.c
common/db_clock.c db/db_conv.c db/db_dispatch.c db/db_dup.c
common/db_err.c common/db_getlong.c common/db_idspace.c
db/db_iface.c db/db_join.c common/db_log2.c db/db_meta.c
db/db_method.c db/db_open.c db/db_overflow.c db/db_ovfl_vrfy.c
db/db_pr.c db/db_rec.c db/db_reclaim.c db/db_remove.c
db/db_rename.c db/db_ret.c env/db_salloc.c db/db_setid.c
db/db_setlsn.c env/db_shash.c db/db_stati.c db/db_truncate.c
db/db_upg.c db/db_upg_opd.c db/db_vrfy.c db/db_vrfyutil.c
dbm/dbm.c dbreg/dbreg.c dbreg/dbreg_auto.c dbreg/dbreg_rec.c
dbreg/dbreg_stat.c dbreg/dbreg_util.c env/env_failchk.c
env/env_file.c env/env_method.c env/env_open.c env/env_recover.c
env/env_region.c env/env_register.c env/env_stat.c
fileops/fileops_auto.c fileops/fop_basic.c fileops/fop_rec.c
fileops/fop_util.c hash/hash.c hash/hash_auto.c hash/hash_conv.c
hash/hash_dup.c hash/hash_func.c hash/hash_meta.c
hash/hash_method.c hash/hash_open.c hash/hash_page.c
hash/hash_rec.c hash/hash_reclaim.c hash/hash_stat.c
hash/hash_upgrade.c hash/hash_verify.c hmac/hmac.c
hsearch/hsearch.c lock/lock.c lock/lock_deadlock.c
lock/lock_failchk.c lock/lock_id.c lock/lock_list.c
lock/lock_method.c lock/lock_region.c lock/lock_stat.c
lock/lock_timer.c lock/lock_util.c log/log.c log/log_archive.c
log/log_compare.c log/log_debug.c log/log_get.c log/log_method.c
log/log_put.c log/log_stat.c mp/mp_alloc.c mp/mp_bh.c mp/mp_fget.c
mp/mp_fmethod.c mp/mp_fopen.c mp/mp_fput.c mp/mp_fset.c
mp/mp_method.c mp/mp_region.c mp/mp_register.c mp/mp_stat.c
mp/mp_sync.c mp/mp_trickle.c crypto/mersenne/mt19937db.c
mutex/mut_alloc.c mutex/mut_method.c mutex/mut_region.c
mutex/mut_stat.c mutex/mut_tas.c mutex/mut_win32.c
os_win32/os_abs.c os/os_alloc.c os_win32/os_clock.c
os_win32/os_config.c os_win32/os_dir.c os_win32/os_errno.c
os_win32/os_fid.c os_win32/os_flock.c os_win32/os_fsync.c
os_win32/os_handle.c os/os_id.c os_win32/os_map.c os/os_method.c
os/os_oflags.c os_win32/os_open.c os/os_region.c
os_win32/os_rename.c os/os_root.c os/os_rpath.c os_win32/os_rw.c
os_win32/os_seek.c os_win32/os_sleep.c os_win32/os_spin.c
os_win32/os_stat.c os/os_tmpdir.c os_win32/os_truncate.c
os/os_unlink.c qam/qam.c qam/qam_auto.c qam/qam_conv.c
qam/qam_files.c qam/qam_method.c qam/qam_open.c qam/qam_rec.c
qam/qam_stat.c qam/qam_upgrade.c qam/qam_verify.c rep/rep_auto.c
rep/rep_backup.c rep/rep_elect.c rep/rep_log.c rep/rep_method.c
rep/rep_record.c rep/rep_region.c rep/rep_stat.c rep/rep_stub.c
rep/rep_util.c rep/rep_verify.c crypto/rijndael/rijndael-alg-fst.c
crypto/rijndael/rijndael-api-fst.c hmac/sha1.c clib/strcasecmp.c
txn/txn.c txn/txn_auto.c txn/txn_chkpt.c txn/txn_failchk.c
txn/txn_method.c txn/txn_rec.c txn/txn_recover.c txn/txn_region.c
txn/txn_stat.c txn/txn_util.c common/util_log.c common/util_sig.c
xa/xa.c xa/xa_db.c xa/xa_map.c)

View File

@ -1,102 +0,0 @@
/*-
* $Id: LICENSE,v 12.1 2005/06/16 20:20:10 bostic Exp $
*/
The following is the license that applies to this copy of the Berkeley DB
software. For a license to use the Berkeley DB software under conditions
other than those described here, or to purchase support for this software,
please contact Sleepycat Software by email at info@sleepycat.com, or on
the Web at http://www.sleepycat.com.
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
/*
* Copyright (c) 1990-2005
* Sleepycat Software. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Redistributions in any form must be accompanied by information on
* how to obtain complete source code for the DB software and any
* accompanying software that uses the DB software. The source code
* must either be included in the distribution or be available for no
* more than the cost of distribution plus a nominal fee, and must be
* freely redistributable under reasonable conditions. For an
* executable file, complete source code means the source code for all
* modules it contains. It does not include source code for modules or
* files that typically accompany the major components of the operating
* system on which the executable file runs.
*
* THIS SOFTWARE IS PROVIDED BY SLEEPYCAT SOFTWARE ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
* NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL SLEEPYCAT SOFTWARE
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 1990, 1993, 1994, 1995
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Copyright (c) 1995, 1996
* The President and Fellows of Harvard University. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY HARVARD AND ITS CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL HARVARD OR ITS CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/

View File

@ -1,56 +0,0 @@
# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# Adaptor makefile to translate between what automake expects and what
# BDB provides (or vice versa).
srcdir = @srcdir@
top_srcdir = @top_srcdir@
# distdir and top_distdir are set by the calling Makefile
bdb_build = build_unix
files = LICENSE Makefile Makefile.in README CMakeLists.txt
subdirs = btree build_win32 clib common cxx db dbinc \
dbinc_auto db185 db_archive db_checkpoint db_deadlock db_dump \
db_dump185 db_hotbackup db_load db_printlog db_recover db_stat db_upgrade \
db_verify dbm dbreg dist env fileops hash \
hsearch hmac include lock log mp mutex os \
os_win32 qam rep txn xa sequence crypto
@SET_MAKE@
all:
cd $(bdb_build) && $(MAKE) all
clean:
cd $(bdb_build) && $(MAKE) clean
distclean:
cd $(bdb_build) && $(MAKE) distclean
# May want to fix this, and MYSQL/configure, to install things
install dvi check installcheck:
distdir:
for s in $(subdirs); do \
cp -pr $(srcdir)/$$s $(distdir)/$$s; \
done
for f in $(files); do \
test -f $(distdir)/$$f || cp -p $(srcdir)/$$f $(distdir)/$$f; \
done
mkdir $(distdir)/$(bdb_build)
cp -p $(srcdir)/$(bdb_build)/.IGNORE_ME $(distdir)/$(bdb_build)

View File

@ -1108,7 +1108,7 @@ int ha_tina::rnd_pos(byte * buf, byte *pos)
{ {
DBUG_ENTER("ha_tina::rnd_pos"); DBUG_ENTER("ha_tina::rnd_pos");
ha_statistic_increment(&SSV::ha_read_rnd_next_count); ha_statistic_increment(&SSV::ha_read_rnd_next_count);
current_position= my_get_ptr(pos,ref_length); current_position= (off_t)my_get_ptr(pos,ref_length);
DBUG_RETURN(find_current_row(buf)); DBUG_RETURN(find_current_row(buf));
} }

View File

@ -280,7 +280,7 @@ static int ft_add_word(MYSQL_FTPARSER_PARAM *param,
static int ft_parse_internal(MYSQL_FTPARSER_PARAM *param, static int ft_parse_internal(MYSQL_FTPARSER_PARAM *param,
byte *doc, int doc_len) char *doc, int doc_len)
{ {
byte *end=doc+doc_len; byte *end=doc+doc_len;
MY_FT_PARSER_PARAM *ft_param=param->mysql_ftparam; MY_FT_PARSER_PARAM *ft_param=param->mysql_ftparam;

View File

@ -65,7 +65,7 @@ for batch in t/* ; do
done done
echo "=====================================" >> var/ft_test.log echo "=====================================" >> var/ft_test.log
$MYSQLD $OPTS --basedir=$BASE --skip-bdb --pid-file=$PID \ $MYSQLD $OPTS --basedir=$BASE --pid-file=$PID \
--language=$ROOT/sql/share/english \ --language=$ROOT/sql/share/english \
--skip-grant-tables --skip-innodb \ --skip-grant-tables --skip-innodb \
--skip-networking --tmpdir=$DATA >> var/ft_test.log 2>&1 & --skip-networking --tmpdir=$DATA >> var/ft_test.log 2>&1 &

View File

@ -22,7 +22,6 @@
int mi_delete_all_rows(MI_INFO *info) int mi_delete_all_rows(MI_INFO *info)
{ {
uint i; uint i;
char buf[22];
MYISAM_SHARE *share=info->s; MYISAM_SHARE *share=info->s;
MI_STATE_INFO *state=&share->state; MI_STATE_INFO *state=&share->state;
DBUG_ENTER("mi_delete_all_rows"); DBUG_ENTER("mi_delete_all_rows");

View File

@ -1178,7 +1178,6 @@ static int _mi_read_rnd_mempack_record(MI_INFO*, byte *,my_off_t, my_bool);
my_bool _mi_memmap_file(MI_INFO *info) my_bool _mi_memmap_file(MI_INFO *info)
{ {
byte *file_map;
MYISAM_SHARE *share=info->s; MYISAM_SHARE *share=info->s;
DBUG_ENTER("mi_memmap_file"); DBUG_ENTER("mi_memmap_file");

View File

@ -357,14 +357,6 @@ myisam_repair_threads = 1
myisam_recover myisam_recover
# *** BDB Specific options ***
# Use this option if you run a MySQL server with BDB support enabled but
# you do not plan to use it. This will save memory and may speed up some
# things.
skip-bdb
# *** INNODB Specific options *** # *** INNODB Specific options ***
# Use this option if you have a MySQL server with InnoDB support enabled # Use this option if you have a MySQL server with InnoDB support enabled

View File

@ -48,9 +48,6 @@ server-id = 1
# Uncomment the following if you want to log updates # Uncomment the following if you want to log updates
#log-bin=mysql-bin #log-bin=mysql-bin
# Uncomment the following if you are NOT using BDB tables
#skip-bdb
# Uncomment the following if you are using InnoDB tables # Uncomment the following if you are using InnoDB tables
#innodb_data_home_dir = @localstatedir@/ #innodb_data_home_dir = @localstatedir@/
#innodb_data_file_path = ibdata1:10M:autoextend #innodb_data_file_path = ibdata1:10M:autoextend

View File

@ -37,3 +37,10 @@ directory and add the following to the Makefile.am in that directory
Note, it's important to have "-t" at the end of the filename, otherwise the Note, it's important to have "-t" at the end of the filename, otherwise the
test won't be executed by 'make test' ! test won't be executed by 'make test' !
Documentation
-------------
There is Doxygen-generated documentation available at:
https://intranet.mysql.com/~mkindahl/mytap/html/

View File

@ -1,19 +1,5 @@
#!/usr/bin/perl #!/usr/bin/perl
# Override _command_line in the standard Perl test harness to prevent
# it from using "perl" to run the test scripts.
package MySQL::Straps;
use base qw(Test::Harness::Straps);
use strict;
sub _command_line {
return $_[1]
}
package main;
use Test::Harness qw(&runtests $verbose); use Test::Harness qw(&runtests $verbose);
use File::Find; use File::Find;
@ -37,9 +23,6 @@ unit - Run unit tests in directory
my $cmd = shift; my $cmd = shift;
# $Test::Harness::Verbose = 1;
# $Test::Harness::Debug = 1;
if (defined $cmd && exists $dispatch{$cmd}) { if (defined $cmd && exists $dispatch{$cmd}) {
$dispatch{$cmd}->(@ARGV); $dispatch{$cmd}->(@ARGV);
} else { } else {
@ -95,14 +78,7 @@ sub run_cmd (@) {
if (@files > 0) { if (@files > 0) {
# Removing the first './' from the file names # Removing the first './' from the file names
foreach (@files) { s!^\./!! } foreach (@files) { s!^\./!! }
$ENV{'HARNESS_PERL_SWITCHES'} .= q" -e 'exec @ARGV'";
# Install the strap above instead of the default strap. Since
# we are replacing the straps under the feet of Test::Harness,
# we need to do some basic initializations in the new straps.
$Test::Harness::Strap = MySQL::Straps->new;
$Test::Harness::Strap->{callback} = \&Test::Harness::strap_callback
if defined &Test::Harness::strap_callback;
runtests @files; runtests @files;
} }
} }

View File

@ -39,7 +39,6 @@ The options right now are
WITH_INNOBASE_STORAGE_ENGINE Enable particular storage engines WITH_INNOBASE_STORAGE_ENGINE Enable particular storage engines
WITH_PARTITION_STORAGE_ENGINE WITH_PARTITION_STORAGE_ENGINE
WITH_ARCHIVE_STORAGE_ENGINE WITH_ARCHIVE_STORAGE_ENGINE
WITH_BERKELEY_STORAGE_ENGINE
WITH_BLACKHOLE_STORAGE_ENGINE WITH_BLACKHOLE_STORAGE_ENGINE
WITH_EXAMPLE_STORAGE_ENGINE WITH_EXAMPLE_STORAGE_ENGINE
WITH_FEDERATED_STORAGE_ENGINE WITH_FEDERATED_STORAGE_ENGINE

View File

@ -24,7 +24,6 @@ try
switch (parts[0]) switch (parts[0])
{ {
case "WITH_ARCHIVE_STORAGE_ENGINE": case "WITH_ARCHIVE_STORAGE_ENGINE":
case "WITH_BERKELEY_STORAGE_ENGINE":
case "WITH_BLACKHOLE_STORAGE_ENGINE": case "WITH_BLACKHOLE_STORAGE_ENGINE":
case "WITH_EXAMPLE_STORAGE_ENGINE": case "WITH_EXAMPLE_STORAGE_ENGINE":
case "WITH_FEDERATED_STORAGE_ENGINE": case "WITH_FEDERATED_STORAGE_ENGINE":
@ -66,8 +65,6 @@ try
configfile.Close(); configfile.Close();
//ConfigureBDB();
fso = null; fso = null;
WScript.Echo("done!"); WScript.Echo("done!");
@ -135,32 +132,3 @@ function GetVersionId(version)
id += build; id += build;
return id; return id;
} }
function ConfigureBDB()
{
// read in the Unix configure.in file
var dbIncTS = fso.OpenTextFile("..\\bdb\\dbinc\\db.in", ForReading);
var dbIn = dbIncTS.ReadAll();
dbIncTS.Close();
dbIn = dbIn.replace("@DB_VERSION_MAJOR@", "$DB_VERSION_MAJOR");
dbIn = dbIn.replace("@DB_VERSION_MINOR@", "$DB_VERSION_MINOR");
dbIn = dbIn.replace("@DB_VERSION_PATCH@", "$DB_VERSION_PATCH");
dbIn = dbIn.replace("@DB_VERSION_STRING@", "$DB_VERSION_STRING");
dbIn = dbIn.replace("@u_int8_decl@", "typedef unsigned char u_int8_t;");
dbIn = dbIn.replace("@int16_decl@", "typedef short int16_t;");
dbIn = dbIn.replace("@u_int16_decl@", "typedef unsigned short u_int16_t;");
dbIn = dbIn.replace("@int32_decl@", "typedef int int32_t;");
dbIn = dbIn.replace("@u_int32_decl@", "typedef unsigned int u_int32_t;");
dbIn = dbIn.replace("@u_char_decl@", "{\r\n#if !defined(_WINSOCKAPI_)\r\n" +
"typedef unsigned char u_char;");
dbIn = dbIn.replace("@u_short_decl@", "typedef unsigned short u_short;");
dbIn = dbIn.replace("@u_int_decl@", "typedef unsigned int u_int;");
dbIn = dbIn.replace("@u_long_decl@", "typedef unsigned long u_long;");
dbIn = dbIn.replace("@ssize_t_decl@", "#endif\r\n#if defined(_WIN64)\r\n" +
"typedef __int64 ssize_t;\r\n#else\r\n" +
"typedef int ssize_t;\r\n#endif");
}