mirror of
https://github.com/MariaDB/server.git
synced 2025-07-30 16:24:05 +03:00
Merge zim.(none):/home/brian/mysql/dep-5.1
into zim.(none):/home/brian/mysql/remove-bdb-5.1
This commit is contained in:
@ -38,7 +38,6 @@ EXTRA_DIST = FINISH.sh \
|
||||
compile-pentium-debug-max \
|
||||
compile-pentium-debug-max-no-embedded \
|
||||
compile-pentium-debug-max-no-ndb \
|
||||
compile-pentium-debug-no-bdb \
|
||||
compile-pentium-debug-openssl \
|
||||
compile-pentium-debug-yassl \
|
||||
compile-pentium-gcov \
|
||||
|
@ -52,7 +52,6 @@ fi
|
||||
--with-csv-storage-engine \
|
||||
--with-example-storage-engine \
|
||||
--with-federated-storage-engine \
|
||||
--with-berkeley-db \
|
||||
--with-innodb \
|
||||
--with-ssl \
|
||||
--enable-thread-safe-client \
|
||||
|
@ -1,9 +0,0 @@
|
||||
#! /bin/sh
|
||||
|
||||
path=`dirname $0`
|
||||
. "$path/SETUP.sh"
|
||||
|
||||
extra_flags="$pentium_cflags $debug_cflags"
|
||||
extra_configs="$pentium_configs $debug_configs --without-berkeley-db $static_link"
|
||||
|
||||
. "$path/FINISH.sh"
|
@ -37,7 +37,7 @@ gmake -k clean || true
|
||||
path=`dirname $0`
|
||||
. "$path/autorun.sh"
|
||||
|
||||
CFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wimplicit-int -Wparentheses -Wsign-compare -Wwrite-strings -Wunused -DHAVE_purify -DEXTRA_DEBUG -O2" CXX=gcc CXXLD=g++ CXXFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor -felide-constructors -fno-exceptions -fno-rtti -DHAVE_purify -DEXTRA_DEBUG -O2" ./configure --prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-berkeley-db --with-embedded-server --with-innodb $EXTRA_CONFIG_FLAGS
|
||||
CFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wimplicit-int -Wparentheses -Wsign-compare -Wwrite-strings -Wunused -DHAVE_purify -DEXTRA_DEBUG -O2" CXX=gcc CXXLD=g++ CXXFLAGS="-g -Wimplicit -Wreturn-type -Wswitch -Wtrigraphs -Wcomment -W -Wchar-subscripts -Wformat -Wparentheses -Wsign-compare -Wwrite-strings -Woverloaded-virtual -Wsign-promo -Wreorder -Wctor-dtor-privacy -Wnon-virtual-dtor -felide-constructors -fno-exceptions -fno-rtti -DHAVE_purify -DEXTRA_DEBUG -O2" ./configure --prefix=/usr/local/mysql --enable-assembler --with-extra-charsets=complex --enable-thread-safe-client --with-embedded-server --with-innodb $EXTRA_CONFIG_FLAGS
|
||||
|
||||
gmake -j 4
|
||||
|
||||
|
@ -131,9 +131,6 @@ ADD_SUBDIRECTORY(client)
|
||||
IF(WITH_ARCHIVE_STORAGE_ENGINE)
|
||||
ADD_SUBDIRECTORY(storage/archive)
|
||||
ENDIF(WITH_ARCHIVE_STORAGE_ENGINE)
|
||||
IF(WITH_BERKELEY_STORAGE_ENGINE)
|
||||
ADD_SUBDIRECTORY(storage/bdb)
|
||||
ENDIF(WITH_BERKELEY_STORAGE_ENGINE)
|
||||
IF(WITH_BLACKHOLE_STORAGE_ENGINE)
|
||||
ADD_SUBDIRECTORY(storage/blackhole)
|
||||
ENDIF(WITH_BLACKHOLE_STORAGE_ENGINE)
|
||||
|
@ -32,7 +32,6 @@ sinclude(config/ac-macros/check_cpu.m4)
|
||||
sinclude(config/ac-macros/character_sets.m4)
|
||||
sinclude(config/ac-macros/compiler_flag.m4)
|
||||
sinclude(config/ac-macros/plugins.m4)
|
||||
sinclude(config/ac-macros/ha_berkeley.m4)
|
||||
sinclude(config/ac-macros/ha_ndbcluster.m4)
|
||||
sinclude(config/ac-macros/large_file.m4)
|
||||
sinclude(config/ac-macros/misc.m4)
|
||||
@ -2142,12 +2141,6 @@ MYSQL_CHECK_SSL
|
||||
# functions tested above
|
||||
#--------------------------------------------------------------------
|
||||
|
||||
MYSQL_STORAGE_ENGINE(berkeley, berkeley-db, [BerkeleyDB Storage Engine],
|
||||
[Transactional Tables using BerkeleyDB], [max,max-no-ndb])
|
||||
MYSQL_PLUGIN_DIRECTORY(berkeley,[storage/bdb])
|
||||
MYSQL_PLUGIN_STATIC(berkeley, [[\$(bdb_libs_with_path)]])
|
||||
MYSQL_PLUGIN_ACTIONS(berkeley, [MYSQL_SETUP_BERKELEY_DB])
|
||||
|
||||
MYSQL_STORAGE_ENGINE(blackhole,,[Blackhole Storage Engine],
|
||||
[Basic Write-only Read-never tables], [max,max-no-ndb])
|
||||
MYSQL_PLUGIN_DIRECTORY(blackhole, [storage/blackhole])
|
||||
|
@ -16,7 +16,6 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
|
||||
${CMAKE_SOURCE_DIR}/sql
|
||||
${CMAKE_SOURCE_DIR}/regex
|
||||
${CMAKE_SOURCE_DIR}/extra/yassl/include
|
||||
${CMAKE_SOURCE_DIR}/storage/bdb/build_win32
|
||||
${CMAKE_SOURCE_DIR}/zlib
|
||||
)
|
||||
|
||||
@ -84,9 +83,6 @@ ENDIF(WITH_EXAMPLE_STORAGE_ENGINE)
|
||||
IF(WITH_INNOBASE_STORAGE_ENGINE)
|
||||
ADD_DEPENDENCIES(mysqlserver innobase)
|
||||
ENDIF(WITH_INNOBASE_STORAGE_ENGINE)
|
||||
IF(WITH_BERKELEY_STORAGE_ENGINE)
|
||||
ADD_DEPENDENCIES(mysqlserver bdb)
|
||||
ENDIF(WITH_BERKELEY_STORAGE_ENGINE)
|
||||
|
||||
ADD_LIBRARY(libmysqld MODULE cmake_dummy.c libmysqld.def)
|
||||
TARGET_LINK_LIBRARIES(libmysqld wsock32)
|
||||
|
@ -45,7 +45,7 @@ noinst_HEADERS = embedded_priv.h emb_qcache.h
|
||||
|
||||
sqlsources = derror.cc field.cc field_conv.cc strfunc.cc filesort.cc \
|
||||
ha_heap.cc ha_myisam.cc ha_myisammrg.cc \
|
||||
ha_innodb.cc ha_berkeley.cc ha_federated.cc ha_ndbcluster.cc \
|
||||
ha_innodb.cc ha_federated.cc ha_ndbcluster.cc \
|
||||
ha_ndbcluster_binlog.cc ha_partition.cc \
|
||||
handler.cc sql_handler.cc \
|
||||
hostname.cc init.cc password.c \
|
||||
@ -96,10 +96,6 @@ yassl_inc_libs= $(top_srcdir)/extra/yassl/src/.libs/libyassl.a \
|
||||
endif
|
||||
|
||||
# Storage engine specific compilation options
|
||||
|
||||
ha_berkeley.o: ha_berkeley.cc
|
||||
$(CXXCOMPILE) @bdb_includes@ $(LM_CFLAGS) -c $<
|
||||
|
||||
ha_ndbcluster.o:ha_ndbcluster.cc
|
||||
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
|
||||
|
||||
|
@ -3,7 +3,6 @@
|
||||
#
|
||||
|
||||
-- source include/not_embedded.inc
|
||||
-- source include/have_bdb.inc
|
||||
-- source include/have_innodb.inc
|
||||
-- source include/have_debug.inc
|
||||
|
||||
@ -12,7 +11,7 @@ drop table if exists t1, t2;
|
||||
--enable_warnings
|
||||
reset master;
|
||||
|
||||
create table t1 (a int) engine=bdb;
|
||||
create table t1 (a int) engine=innodb;
|
||||
create table t2 (a int) engine=innodb;
|
||||
begin;
|
||||
insert t1 values (5);
|
||||
|
@ -1,4 +0,0 @@
|
||||
-- require r/have_bdb.require
|
||||
disable_query_log;
|
||||
show variables like "have_bdb";
|
||||
enable_query_log;
|
@ -80,7 +80,7 @@ basedir=.
|
||||
EXTRA_ARG="--language=../sql/share/english/ --character-sets-dir=../sql/share/charsets/"
|
||||
fi
|
||||
|
||||
mysqld_boot=" $execdir/mysqld --no-defaults --bootstrap --skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb --skip-ndbcluster --skip-bdb --tmpdir=. $EXTRA_ARG"
|
||||
mysqld_boot=" $execdir/mysqld --no-defaults --bootstrap --skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb --skip-ndbcluster --tmpdir=. $EXTRA_ARG"
|
||||
echo "running $mysqld_boot"
|
||||
|
||||
if $scriptdir/mysql_create_system_tables test $mdata $hostname | $mysqld_boot
|
||||
|
@ -2122,7 +2122,6 @@ sub install_db ($$) {
|
||||
mtr_add_arg($args, "--datadir=%s", $data_dir);
|
||||
mtr_add_arg($args, "--skip-innodb");
|
||||
mtr_add_arg($args, "--skip-ndbcluster");
|
||||
mtr_add_arg($args, "--skip-bdb");
|
||||
mtr_add_arg($args, "--tmpdir=.");
|
||||
|
||||
if ( ! $opt_netware )
|
||||
@ -2215,7 +2214,6 @@ basedir = $path_my_basedir
|
||||
server_id = $server_id
|
||||
skip-stack-trace
|
||||
skip-innodb
|
||||
skip-bdb
|
||||
skip-ndbcluster
|
||||
EOF
|
||||
;
|
||||
@ -2629,7 +2627,6 @@ sub mysqld_arguments ($$$$$) {
|
||||
if ( $opt_valgrind_mysqld )
|
||||
{
|
||||
mtr_add_arg($args, "%s--skip-safemalloc", $prefix);
|
||||
mtr_add_arg($args, "%s--skip-bdb", $prefix);
|
||||
}
|
||||
|
||||
my $pidfile;
|
||||
|
@ -536,8 +536,8 @@ while test $# -gt 0; do
|
||||
--valgrind | --valgrind-all)
|
||||
find_valgrind;
|
||||
VALGRIND=$FIND_VALGRIND
|
||||
EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-safemalloc --skip-bdb"
|
||||
EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-safemalloc --skip-bdb"
|
||||
EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-safemalloc"
|
||||
EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-safemalloc"
|
||||
SLEEP_TIME_AFTER_RESTART=10
|
||||
SLEEP_TIME_FOR_DELETE=60
|
||||
USE_RUNNING_SERVER=0
|
||||
|
@ -6,26 +6,26 @@ Table Op Msg_type Msg_text
|
||||
test.t4 backup error Failed copying .frm file (errno: X)
|
||||
test.t4 backup status Operation failed
|
||||
Warnings:
|
||||
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
|
||||
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
|
||||
Error 1 Can't create/write to file 'MYSQLTEST_VARDIR/bogus/t4.frm' (Errcode: X)
|
||||
backup table t4 to '../tmp';
|
||||
Table Op Msg_type Msg_text
|
||||
test.t4 backup status OK
|
||||
Warnings:
|
||||
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
|
||||
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
|
||||
backup table t4 to '../tmp';
|
||||
Table Op Msg_type Msg_text
|
||||
test.t4 backup error Failed copying .frm file (errno: X)
|
||||
test.t4 backup status Operation failed
|
||||
Warnings:
|
||||
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
|
||||
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
|
||||
Error 1 Can't create/write to file 'MYSQLTEST_VARDIR/tmp/t4.frm' (Errcode: X)
|
||||
drop table t4;
|
||||
restore table t4 from '../tmp';
|
||||
Table Op Msg_type Msg_text
|
||||
test.t4 restore status OK
|
||||
Warnings:
|
||||
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
|
||||
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
|
||||
select count(*) from t4;
|
||||
count(*)
|
||||
0
|
||||
@ -35,19 +35,19 @@ backup table t1 to '../tmp';
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 backup status OK
|
||||
Warnings:
|
||||
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
|
||||
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
|
||||
drop table t1;
|
||||
restore table t1 from '../bogus';
|
||||
Table Op Msg_type Msg_text
|
||||
t1 restore error Failed copying .frm file
|
||||
Warnings:
|
||||
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
|
||||
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
|
||||
Error 29 File 'MYSQLTEST_VARDIR/bogus/t1.frm' not found (Errcode: X)
|
||||
restore table t1 from '../tmp';
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 restore status OK
|
||||
Warnings:
|
||||
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
|
||||
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
|
||||
select n from t1;
|
||||
n
|
||||
23
|
||||
@ -62,7 +62,7 @@ Table Op Msg_type Msg_text
|
||||
test.t2 backup status OK
|
||||
test.t3 backup status OK
|
||||
Warnings:
|
||||
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
|
||||
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
|
||||
drop table t1,t2,t3;
|
||||
restore table t1,t2,t3 from '../tmp';
|
||||
Table Op Msg_type Msg_text
|
||||
@ -70,7 +70,7 @@ test.t1 restore status OK
|
||||
test.t2 restore status OK
|
||||
test.t3 restore status OK
|
||||
Warnings:
|
||||
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
|
||||
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
|
||||
select n from t1;
|
||||
n
|
||||
23
|
||||
@ -91,7 +91,7 @@ restore table t1 from '../tmp';
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 restore status OK
|
||||
Warnings:
|
||||
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
|
||||
Warning 1541 The syntax 'RESTORE TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
|
||||
rename table t1 to t5;
|
||||
lock tables t5 write;
|
||||
backup table t5 to '../tmp';
|
||||
@ -99,7 +99,7 @@ unlock tables;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t5 backup status OK
|
||||
Warnings:
|
||||
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead.
|
||||
Warning 1541 The syntax 'BACKUP TABLE' is deprecated and will be removed in MySQL 5.2. Please use MySQL Administrator (mysqldump, mysql) instead
|
||||
drop table t5;
|
||||
DROP TABLE IF EXISTS `t+1`;
|
||||
CREATE TABLE `t+1` (c1 INT);
|
||||
|
@ -1,11 +0,0 @@
|
||||
drop table if exists t1;
|
||||
create table t1(objid BIGINT not null, tablename varchar(64), oid BIGINT not null, test BIGINT, PRIMARY KEY (objid), UNIQUE(tablename)) engine=BDB;
|
||||
insert into t1 values(1, 't1',4,9);
|
||||
insert into t1 values(2, 'metatable',1,9);
|
||||
insert into t1 values(3, 'metaindex',1,9 );
|
||||
select * from t1;
|
||||
objid tablename oid test
|
||||
1 t1 4 9
|
||||
2 metatable 1 9
|
||||
3 metaindex 1 9
|
||||
alter table t1 drop column test;
|
@ -1,6 +0,0 @@
|
||||
select * from t1;
|
||||
objid tablename oid
|
||||
1 t1 4
|
||||
2 metatable 1
|
||||
3 metaindex 1
|
||||
drop table t1;
|
@ -1,39 +0,0 @@
|
||||
drop table if exists t1;
|
||||
CREATE TABLE t1 (
|
||||
ChargeID int(10) unsigned NOT NULL auto_increment,
|
||||
ServiceID int(10) unsigned DEFAULT '0' NOT NULL,
|
||||
ChargeDate date DEFAULT '0000-00-00' NOT NULL,
|
||||
ChargeAmount decimal(20,2) DEFAULT '0.00' NOT NULL,
|
||||
FedTaxes decimal(20,2) DEFAULT '0.00' NOT NULL,
|
||||
ProvTaxes decimal(20,2) DEFAULT '0.00' NOT NULL,
|
||||
ChargeStatus enum('New','Auth','Unauth','Sale','Denied','Refund')
|
||||
DEFAULT 'New' NOT NULL,
|
||||
ChargeAuthorizationMessage text,
|
||||
ChargeComment text,
|
||||
ChargeTimeStamp varchar(20),
|
||||
PRIMARY KEY (ChargeID),
|
||||
KEY ServiceID (ServiceID),
|
||||
KEY ChargeDate (ChargeDate)
|
||||
) engine=BDB;
|
||||
BEGIN;
|
||||
INSERT INTO t1
|
||||
VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now');
|
||||
COMMIT;
|
||||
BEGIN;
|
||||
UPDATE t1 SET ChargeAuthorizationMessage = 'blablabla' WHERE
|
||||
ChargeID = 1;
|
||||
COMMIT;
|
||||
INSERT INTO t1
|
||||
VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now');
|
||||
select * from t1;
|
||||
ChargeID ServiceID ChargeDate ChargeAmount FedTaxes ProvTaxes ChargeStatus ChargeAuthorizationMessage ChargeComment ChargeTimeStamp
|
||||
1 1 2001-03-01 1.00 1.00 1.00 New blablabla NULL now
|
||||
2 1 2001-03-01 1.00 1.00 1.00 New NULL NULL now
|
||||
drop table t1;
|
||||
create table t1 (a int) engine=bdb;
|
||||
set autocommit=0;
|
||||
insert into t1 values(1);
|
||||
analyze table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status OK
|
||||
drop table t1;
|
@ -1,31 +0,0 @@
|
||||
drop table if exists t1,t2;
|
||||
create table t1 (id integer, x integer) engine=BDB;
|
||||
create table t2 (id integer, x integer) engine=BDB;
|
||||
insert into t1 values(0, 0);
|
||||
insert into t2 values(0, 0);
|
||||
set autocommit=0;
|
||||
update t1 set x = 1 where id = 0;
|
||||
set autocommit=0;
|
||||
update t2 set x = 1 where id = 0;
|
||||
select x from t1 where id = 0;
|
||||
select x from t2 where id = 0;
|
||||
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
|
||||
commit;
|
||||
x
|
||||
1
|
||||
commit;
|
||||
select * from t1;
|
||||
id x
|
||||
0 1
|
||||
select * from t2;
|
||||
id x
|
||||
0 1
|
||||
commit;
|
||||
select * from t1;
|
||||
id x
|
||||
0 1
|
||||
select * from t2;
|
||||
id x
|
||||
0 1
|
||||
commit;
|
||||
drop table t1,t2;
|
File diff suppressed because it is too large
Load Diff
@ -1,99 +0,0 @@
|
||||
drop table if exists t1, t2, t3;
|
||||
flush status;
|
||||
set autocommit=0;
|
||||
create table t1 (a int not null) engine=bdb;
|
||||
insert into t1 values (1),(2),(3);
|
||||
select * from t1;
|
||||
a
|
||||
1
|
||||
2
|
||||
3
|
||||
show status like "Qcache_queries_in_cache";
|
||||
Variable_name Value
|
||||
Qcache_queries_in_cache 0
|
||||
drop table t1;
|
||||
set autocommit=1;
|
||||
create table t1 (a int not null) engine=bdb;
|
||||
begin;
|
||||
insert into t1 values (1),(2),(3);
|
||||
select * from t1;
|
||||
a
|
||||
1
|
||||
2
|
||||
3
|
||||
show status like "Qcache_queries_in_cache";
|
||||
Variable_name Value
|
||||
Qcache_queries_in_cache 0
|
||||
drop table t1;
|
||||
create table t1 (a int not null) engine=bdb;
|
||||
create table t2 (a int not null) engine=bdb;
|
||||
create table t3 (a int not null) engine=bdb;
|
||||
insert into t1 values (1),(2);
|
||||
insert into t2 values (1),(2);
|
||||
insert into t3 values (1),(2);
|
||||
select * from t1;
|
||||
a
|
||||
1
|
||||
2
|
||||
select * from t2;
|
||||
a
|
||||
1
|
||||
2
|
||||
select * from t3;
|
||||
a
|
||||
1
|
||||
2
|
||||
show status like "Qcache_queries_in_cache";
|
||||
Variable_name Value
|
||||
Qcache_queries_in_cache 3
|
||||
show status like "Qcache_hits";
|
||||
Variable_name Value
|
||||
Qcache_hits 0
|
||||
begin;
|
||||
select * from t1;
|
||||
a
|
||||
1
|
||||
2
|
||||
select * from t2;
|
||||
a
|
||||
1
|
||||
2
|
||||
select * from t3;
|
||||
a
|
||||
1
|
||||
2
|
||||
show status like "Qcache_queries_in_cache";
|
||||
Variable_name Value
|
||||
Qcache_queries_in_cache 3
|
||||
show status like "Qcache_hits";
|
||||
Variable_name Value
|
||||
Qcache_hits 0
|
||||
insert into t1 values (3);
|
||||
insert into t2 values (3);
|
||||
insert into t1 values (4);
|
||||
select * from t1;
|
||||
a
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
select * from t2;
|
||||
a
|
||||
1
|
||||
2
|
||||
3
|
||||
select * from t3;
|
||||
a
|
||||
1
|
||||
2
|
||||
show status like "Qcache_queries_in_cache";
|
||||
Variable_name Value
|
||||
Qcache_queries_in_cache 3
|
||||
show status like "Qcache_hits";
|
||||
Variable_name Value
|
||||
Qcache_hits 0
|
||||
commit;
|
||||
show status like "Qcache_queries_in_cache";
|
||||
Variable_name Value
|
||||
Qcache_queries_in_cache 1
|
||||
drop table if exists t1, t2, t3;
|
@ -1,462 +0,0 @@
|
||||
SET storage_engine=bdb;
|
||||
DROP TABLE IF EXISTS t1, gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
|
||||
CREATE TABLE gis_point (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g POINT);
|
||||
CREATE TABLE gis_line (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g LINESTRING);
|
||||
CREATE TABLE gis_polygon (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g POLYGON);
|
||||
CREATE TABLE gis_multi_point (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g MULTIPOINT);
|
||||
CREATE TABLE gis_multi_line (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g MULTILINESTRING);
|
||||
CREATE TABLE gis_multi_polygon (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g MULTIPOLYGON);
|
||||
CREATE TABLE gis_geometrycollection (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g GEOMETRYCOLLECTION);
|
||||
CREATE TABLE gis_geometry (fid INTEGER PRIMARY KEY AUTO_INCREMENT, g GEOMETRY);
|
||||
SHOW CREATE TABLE gis_point;
|
||||
Table Create Table
|
||||
gis_point CREATE TABLE `gis_point` (
|
||||
`fid` int(11) NOT NULL AUTO_INCREMENT,
|
||||
`g` point DEFAULT NULL,
|
||||
PRIMARY KEY (`fid`)
|
||||
) ENGINE=BerkeleyDB DEFAULT CHARSET=latin1
|
||||
SHOW FIELDS FROM gis_point;
|
||||
Field Type Null Key Default Extra
|
||||
fid int(11) NO PRI NULL auto_increment
|
||||
g point YES NULL
|
||||
SHOW FIELDS FROM gis_line;
|
||||
Field Type Null Key Default Extra
|
||||
fid int(11) NO PRI NULL auto_increment
|
||||
g linestring YES NULL
|
||||
SHOW FIELDS FROM gis_polygon;
|
||||
Field Type Null Key Default Extra
|
||||
fid int(11) NO PRI NULL auto_increment
|
||||
g polygon YES NULL
|
||||
SHOW FIELDS FROM gis_multi_point;
|
||||
Field Type Null Key Default Extra
|
||||
fid int(11) NO PRI NULL auto_increment
|
||||
g multipoint YES NULL
|
||||
SHOW FIELDS FROM gis_multi_line;
|
||||
Field Type Null Key Default Extra
|
||||
fid int(11) NO PRI NULL auto_increment
|
||||
g multilinestring YES NULL
|
||||
SHOW FIELDS FROM gis_multi_polygon;
|
||||
Field Type Null Key Default Extra
|
||||
fid int(11) NO PRI NULL auto_increment
|
||||
g multipolygon YES NULL
|
||||
SHOW FIELDS FROM gis_geometrycollection;
|
||||
Field Type Null Key Default Extra
|
||||
fid int(11) NO PRI NULL auto_increment
|
||||
g geometrycollection YES NULL
|
||||
SHOW FIELDS FROM gis_geometry;
|
||||
Field Type Null Key Default Extra
|
||||
fid int(11) NO PRI NULL auto_increment
|
||||
g geometry YES NULL
|
||||
INSERT INTO gis_point VALUES
|
||||
(101, PointFromText('POINT(10 10)')),
|
||||
(102, PointFromText('POINT(20 10)')),
|
||||
(103, PointFromText('POINT(20 20)')),
|
||||
(104, PointFromWKB(AsWKB(PointFromText('POINT(10 20)'))));
|
||||
INSERT INTO gis_line VALUES
|
||||
(105, LineFromText('LINESTRING(0 0,0 10,10 0)')),
|
||||
(106, LineStringFromText('LINESTRING(10 10,20 10,20 20,10 20,10 10)')),
|
||||
(107, LineStringFromWKB(LineString(Point(10, 10), Point(40, 10))));
|
||||
INSERT INTO gis_polygon VALUES
|
||||
(108, PolygonFromText('POLYGON((10 10,20 10,20 20,10 20,10 10))')),
|
||||
(109, PolyFromText('POLYGON((0 0,50 0,50 50,0 50,0 0), (10 10,20 10,20 20,10 20,10 10))')),
|
||||
(110, PolyFromWKB(Polygon(LineString(Point(0, 0), Point(30, 0), Point(30, 30), Point(0, 0)))));
|
||||
INSERT INTO gis_multi_point VALUES
|
||||
(111, MultiPointFromText('MULTIPOINT(0 0,10 10,10 20,20 20)')),
|
||||
(112, MPointFromText('MULTIPOINT(1 1,11 11,11 21,21 21)')),
|
||||
(113, MPointFromWKB(MultiPoint(Point(3, 6), Point(4, 10))));
|
||||
INSERT INTO gis_multi_line VALUES
|
||||
(114, MultiLineStringFromText('MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))')),
|
||||
(115, MLineFromText('MULTILINESTRING((10 48,10 21,10 0))')),
|
||||
(116, MLineFromWKB(MultiLineString(LineString(Point(1, 2), Point(3, 5)), LineString(Point(2, 5), Point(5, 8), Point(21, 7)))));
|
||||
INSERT INTO gis_multi_polygon VALUES
|
||||
(117, MultiPolygonFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
|
||||
(118, MPolyFromText('MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))')),
|
||||
(119, MPolyFromWKB(MultiPolygon(Polygon(LineString(Point(0, 3), Point(3, 3), Point(3, 0), Point(0, 3))))));
|
||||
INSERT INTO gis_geometrycollection VALUES
|
||||
(120, GeomCollFromText('GEOMETRYCOLLECTION(POINT(0 0), LINESTRING(0 0,10 10))')),
|
||||
(121, GeometryFromWKB(GeometryCollection(Point(44, 6), LineString(Point(3, 6), Point(7, 9)))));
|
||||
INSERT into gis_geometry SELECT * FROM gis_point;
|
||||
INSERT into gis_geometry SELECT * FROM gis_line;
|
||||
INSERT into gis_geometry SELECT * FROM gis_polygon;
|
||||
INSERT into gis_geometry SELECT * FROM gis_multi_point;
|
||||
INSERT into gis_geometry SELECT * FROM gis_multi_line;
|
||||
INSERT into gis_geometry SELECT * FROM gis_multi_polygon;
|
||||
INSERT into gis_geometry SELECT * FROM gis_geometrycollection;
|
||||
SELECT fid, AsText(g) FROM gis_point ORDER by fid;
|
||||
fid AsText(g)
|
||||
101 POINT(10 10)
|
||||
102 POINT(20 10)
|
||||
103 POINT(20 20)
|
||||
104 POINT(10 20)
|
||||
SELECT fid, AsText(g) FROM gis_line ORDER by fid;
|
||||
fid AsText(g)
|
||||
105 LINESTRING(0 0,0 10,10 0)
|
||||
106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
|
||||
107 LINESTRING(10 10,40 10)
|
||||
SELECT fid, AsText(g) FROM gis_polygon ORDER by fid;
|
||||
fid AsText(g)
|
||||
108 POLYGON((10 10,20 10,20 20,10 20,10 10))
|
||||
109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
|
||||
110 POLYGON((0 0,30 0,30 30,0 0))
|
||||
SELECT fid, AsText(g) FROM gis_multi_point ORDER by fid;
|
||||
fid AsText(g)
|
||||
111 MULTIPOINT(0 0,10 10,10 20,20 20)
|
||||
112 MULTIPOINT(1 1,11 11,11 21,21 21)
|
||||
113 MULTIPOINT(3 6,4 10)
|
||||
SELECT fid, AsText(g) FROM gis_multi_line ORDER by fid;
|
||||
fid AsText(g)
|
||||
114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
|
||||
115 MULTILINESTRING((10 48,10 21,10 0))
|
||||
116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
|
||||
SELECT fid, AsText(g) FROM gis_multi_polygon ORDER by fid;
|
||||
fid AsText(g)
|
||||
117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
|
||||
118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
|
||||
119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
|
||||
SELECT fid, AsText(g) FROM gis_geometrycollection ORDER by fid;
|
||||
fid AsText(g)
|
||||
120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
|
||||
121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
|
||||
SELECT fid, AsText(g) FROM gis_geometry ORDER by fid;
|
||||
fid AsText(g)
|
||||
101 POINT(10 10)
|
||||
102 POINT(20 10)
|
||||
103 POINT(20 20)
|
||||
104 POINT(10 20)
|
||||
105 LINESTRING(0 0,0 10,10 0)
|
||||
106 LINESTRING(10 10,20 10,20 20,10 20,10 10)
|
||||
107 LINESTRING(10 10,40 10)
|
||||
108 POLYGON((10 10,20 10,20 20,10 20,10 10))
|
||||
109 POLYGON((0 0,50 0,50 50,0 50,0 0),(10 10,20 10,20 20,10 20,10 10))
|
||||
110 POLYGON((0 0,30 0,30 30,0 0))
|
||||
111 MULTIPOINT(0 0,10 10,10 20,20 20)
|
||||
112 MULTIPOINT(1 1,11 11,11 21,21 21)
|
||||
113 MULTIPOINT(3 6,4 10)
|
||||
114 MULTILINESTRING((10 48,10 21,10 0),(16 0,16 23,16 48))
|
||||
115 MULTILINESTRING((10 48,10 21,10 0))
|
||||
116 MULTILINESTRING((1 2,3 5),(2 5,5 8,21 7))
|
||||
117 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
|
||||
118 MULTIPOLYGON(((28 26,28 0,84 0,84 42,28 26),(52 18,66 23,73 9,48 6,52 18)),((59 18,67 18,67 13,59 13,59 18)))
|
||||
119 MULTIPOLYGON(((0 3,3 3,3 0,0 3)))
|
||||
120 GEOMETRYCOLLECTION(POINT(0 0),LINESTRING(0 0,10 10))
|
||||
121 GEOMETRYCOLLECTION(POINT(44 6),LINESTRING(3 6,7 9))
|
||||
SELECT fid, Dimension(g) FROM gis_geometry ORDER by fid;
|
||||
fid Dimension(g)
|
||||
101 0
|
||||
102 0
|
||||
103 0
|
||||
104 0
|
||||
105 1
|
||||
106 1
|
||||
107 1
|
||||
108 2
|
||||
109 2
|
||||
110 2
|
||||
111 0
|
||||
112 0
|
||||
113 0
|
||||
114 1
|
||||
115 1
|
||||
116 1
|
||||
117 2
|
||||
118 2
|
||||
119 2
|
||||
120 1
|
||||
121 1
|
||||
SELECT fid, GeometryType(g) FROM gis_geometry ORDER by fid;
|
||||
fid GeometryType(g)
|
||||
101 POINT
|
||||
102 POINT
|
||||
103 POINT
|
||||
104 POINT
|
||||
105 LINESTRING
|
||||
106 LINESTRING
|
||||
107 LINESTRING
|
||||
108 POLYGON
|
||||
109 POLYGON
|
||||
110 POLYGON
|
||||
111 MULTIPOINT
|
||||
112 MULTIPOINT
|
||||
113 MULTIPOINT
|
||||
114 MULTILINESTRING
|
||||
115 MULTILINESTRING
|
||||
116 MULTILINESTRING
|
||||
117 MULTIPOLYGON
|
||||
118 MULTIPOLYGON
|
||||
119 MULTIPOLYGON
|
||||
120 GEOMETRYCOLLECTION
|
||||
121 GEOMETRYCOLLECTION
|
||||
SELECT fid, IsEmpty(g) FROM gis_geometry ORDER by fid;
|
||||
fid IsEmpty(g)
|
||||
101 0
|
||||
102 0
|
||||
103 0
|
||||
104 0
|
||||
105 0
|
||||
106 0
|
||||
107 0
|
||||
108 0
|
||||
109 0
|
||||
110 0
|
||||
111 0
|
||||
112 0
|
||||
113 0
|
||||
114 0
|
||||
115 0
|
||||
116 0
|
||||
117 0
|
||||
118 0
|
||||
119 0
|
||||
120 0
|
||||
121 0
|
||||
SELECT fid, AsText(Envelope(g)) FROM gis_geometry ORDER by fid;
|
||||
fid AsText(Envelope(g))
|
||||
101 POLYGON((10 10,10 10,10 10,10 10,10 10))
|
||||
102 POLYGON((20 10,20 10,20 10,20 10,20 10))
|
||||
103 POLYGON((20 20,20 20,20 20,20 20,20 20))
|
||||
104 POLYGON((10 20,10 20,10 20,10 20,10 20))
|
||||
105 POLYGON((0 0,10 0,10 10,0 10,0 0))
|
||||
106 POLYGON((10 10,20 10,20 20,10 20,10 10))
|
||||
107 POLYGON((10 10,40 10,40 10,10 10,10 10))
|
||||
108 POLYGON((10 10,20 10,20 20,10 20,10 10))
|
||||
109 POLYGON((0 0,50 0,50 50,0 50,0 0))
|
||||
110 POLYGON((0 0,30 0,30 30,0 30,0 0))
|
||||
111 POLYGON((0 0,20 0,20 20,0 20,0 0))
|
||||
112 POLYGON((1 1,21 1,21 21,1 21,1 1))
|
||||
113 POLYGON((3 6,4 6,4 10,3 10,3 6))
|
||||
114 POLYGON((10 0,16 0,16 48,10 48,10 0))
|
||||
115 POLYGON((10 0,10 0,10 48,10 48,10 0))
|
||||
116 POLYGON((1 2,21 2,21 8,1 8,1 2))
|
||||
117 POLYGON((28 0,84 0,84 42,28 42,28 0))
|
||||
118 POLYGON((28 0,84 0,84 42,28 42,28 0))
|
||||
119 POLYGON((0 0,3 0,3 3,0 3,0 0))
|
||||
120 POLYGON((0 0,10 0,10 10,0 10,0 0))
|
||||
121 POLYGON((3 6,44 6,44 9,3 9,3 6))
|
||||
explain extended select Dimension(g), GeometryType(g), IsEmpty(g), AsText(Envelope(g)) from gis_geometry;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE gis_geometry ALL NULL NULL NULL NULL 21 100.00
|
||||
Warnings:
|
||||
Note 1003 select dimension(`test`.`gis_geometry`.`g`) AS `Dimension(g)`,geometrytype(`test`.`gis_geometry`.`g`) AS `GeometryType(g)`,isempty(`test`.`gis_geometry`.`g`) AS `IsEmpty(g)`,astext(envelope(`test`.`gis_geometry`.`g`)) AS `AsText(Envelope(g))` from `test`.`gis_geometry`
|
||||
SELECT fid, X(g) FROM gis_point ORDER by fid;
|
||||
fid X(g)
|
||||
101 10
|
||||
102 20
|
||||
103 20
|
||||
104 10
|
||||
SELECT fid, Y(g) FROM gis_point ORDER by fid;
|
||||
fid Y(g)
|
||||
101 10
|
||||
102 10
|
||||
103 20
|
||||
104 20
|
||||
explain extended select X(g),Y(g) FROM gis_point;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE gis_point ALL NULL NULL NULL NULL 4 100.00
|
||||
Warnings:
|
||||
Note 1003 select x(`test`.`gis_point`.`g`) AS `X(g)`,y(`test`.`gis_point`.`g`) AS `Y(g)` from `test`.`gis_point`
|
||||
SELECT fid, AsText(StartPoint(g)) FROM gis_line ORDER by fid;
|
||||
fid AsText(StartPoint(g))
|
||||
105 POINT(0 0)
|
||||
106 POINT(10 10)
|
||||
107 POINT(10 10)
|
||||
SELECT fid, AsText(EndPoint(g)) FROM gis_line ORDER by fid;
|
||||
fid AsText(EndPoint(g))
|
||||
105 POINT(10 0)
|
||||
106 POINT(10 10)
|
||||
107 POINT(40 10)
|
||||
SELECT fid, GLength(g) FROM gis_line ORDER by fid;
|
||||
fid GLength(g)
|
||||
105 24.142135623731
|
||||
106 40
|
||||
107 30
|
||||
SELECT fid, NumPoints(g) FROM gis_line ORDER by fid;
|
||||
fid NumPoints(g)
|
||||
105 3
|
||||
106 5
|
||||
107 2
|
||||
SELECT fid, AsText(PointN(g, 2)) FROM gis_line ORDER by fid;
|
||||
fid AsText(PointN(g, 2))
|
||||
105 POINT(0 10)
|
||||
106 POINT(20 10)
|
||||
107 POINT(40 10)
|
||||
SELECT fid, IsClosed(g) FROM gis_line ORDER by fid;
|
||||
fid IsClosed(g)
|
||||
105 0
|
||||
106 1
|
||||
107 0
|
||||
explain extended select AsText(StartPoint(g)),AsText(EndPoint(g)),GLength(g),NumPoints(g),AsText(PointN(g, 2)),IsClosed(g) FROM gis_line;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE gis_line ALL NULL NULL NULL NULL 3 100.00
|
||||
Warnings:
|
||||
Note 1003 select astext(startpoint(`test`.`gis_line`.`g`)) AS `AsText(StartPoint(g))`,astext(endpoint(`test`.`gis_line`.`g`)) AS `AsText(EndPoint(g))`,glength(`test`.`gis_line`.`g`) AS `GLength(g)`,numpoints(`test`.`gis_line`.`g`) AS `NumPoints(g)`,astext(pointn(`test`.`gis_line`.`g`,2)) AS `AsText(PointN(g, 2))`,isclosed(`test`.`gis_line`.`g`) AS `IsClosed(g)` from `test`.`gis_line`
|
||||
SELECT fid, AsText(Centroid(g)) FROM gis_polygon ORDER by fid;
|
||||
fid AsText(Centroid(g))
|
||||
108 POINT(15 15)
|
||||
109 POINT(25.416666666667 25.416666666667)
|
||||
110 POINT(20 10)
|
||||
SELECT fid, Area(g) FROM gis_polygon ORDER by fid;
|
||||
fid Area(g)
|
||||
108 100
|
||||
109 2400
|
||||
110 450
|
||||
SELECT fid, AsText(ExteriorRing(g)) FROM gis_polygon ORDER by fid;
|
||||
fid AsText(ExteriorRing(g))
|
||||
108 LINESTRING(10 10,20 10,20 20,10 20,10 10)
|
||||
109 LINESTRING(0 0,50 0,50 50,0 50,0 0)
|
||||
110 LINESTRING(0 0,30 0,30 30,0 0)
|
||||
SELECT fid, NumInteriorRings(g) FROM gis_polygon ORDER by fid;
|
||||
fid NumInteriorRings(g)
|
||||
108 0
|
||||
109 1
|
||||
110 0
|
||||
SELECT fid, AsText(InteriorRingN(g, 1)) FROM gis_polygon ORDER by fid;
|
||||
fid AsText(InteriorRingN(g, 1))
|
||||
108 NULL
|
||||
109 LINESTRING(10 10,20 10,20 20,10 20,10 10)
|
||||
110 NULL
|
||||
explain extended select AsText(Centroid(g)),Area(g),AsText(ExteriorRing(g)),NumInteriorRings(g),AsText(InteriorRingN(g, 1)) FROM gis_polygon;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE gis_polygon ALL NULL NULL NULL NULL 3 100.00
|
||||
Warnings:
|
||||
Note 1003 select astext(centroid(`test`.`gis_polygon`.`g`)) AS `AsText(Centroid(g))`,area(`test`.`gis_polygon`.`g`) AS `Area(g)`,astext(exteriorring(`test`.`gis_polygon`.`g`)) AS `AsText(ExteriorRing(g))`,numinteriorrings(`test`.`gis_polygon`.`g`) AS `NumInteriorRings(g)`,astext(interiorringn(`test`.`gis_polygon`.`g`,1)) AS `AsText(InteriorRingN(g, 1))` from `test`.`gis_polygon`
|
||||
SELECT fid, IsClosed(g) FROM gis_multi_line ORDER by fid;
|
||||
fid IsClosed(g)
|
||||
114 0
|
||||
115 0
|
||||
116 0
|
||||
SELECT fid, AsText(Centroid(g)) FROM gis_multi_polygon ORDER by fid;
|
||||
fid AsText(Centroid(g))
|
||||
117 POINT(55.588527753042 17.426536064114)
|
||||
118 POINT(55.588527753042 17.426536064114)
|
||||
119 POINT(2 2)
|
||||
SELECT fid, Area(g) FROM gis_multi_polygon ORDER by fid;
|
||||
fid Area(g)
|
||||
117 1684.5
|
||||
118 1684.5
|
||||
119 4.5
|
||||
SELECT fid, NumGeometries(g) from gis_multi_point ORDER by fid;
|
||||
fid NumGeometries(g)
|
||||
111 4
|
||||
112 4
|
||||
113 2
|
||||
SELECT fid, NumGeometries(g) from gis_multi_line ORDER by fid;
|
||||
fid NumGeometries(g)
|
||||
114 2
|
||||
115 1
|
||||
116 2
|
||||
SELECT fid, NumGeometries(g) from gis_multi_polygon ORDER by fid;
|
||||
fid NumGeometries(g)
|
||||
117 2
|
||||
118 2
|
||||
119 1
|
||||
SELECT fid, NumGeometries(g) from gis_geometrycollection ORDER by fid;
|
||||
fid NumGeometries(g)
|
||||
120 2
|
||||
121 2
|
||||
explain extended SELECT fid, NumGeometries(g) from gis_multi_point;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE gis_multi_point ALL NULL NULL NULL NULL 3 100.00
|
||||
Warnings:
|
||||
Note 1003 select `test`.`gis_multi_point`.`fid` AS `fid`,numgeometries(`test`.`gis_multi_point`.`g`) AS `NumGeometries(g)` from `test`.`gis_multi_point`
|
||||
SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point ORDER by fid;
|
||||
fid AsText(GeometryN(g, 2))
|
||||
111 POINT(10 10)
|
||||
112 POINT(11 11)
|
||||
113 POINT(4 10)
|
||||
SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_line ORDER by fid;
|
||||
fid AsText(GeometryN(g, 2))
|
||||
114 LINESTRING(16 0,16 23,16 48)
|
||||
115 NULL
|
||||
116 LINESTRING(2 5,5 8,21 7)
|
||||
SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_polygon ORDER by fid;
|
||||
fid AsText(GeometryN(g, 2))
|
||||
117 POLYGON((59 18,67 18,67 13,59 13,59 18))
|
||||
118 POLYGON((59 18,67 18,67 13,59 13,59 18))
|
||||
119 NULL
|
||||
SELECT fid, AsText(GeometryN(g, 2)) from gis_geometrycollection ORDER by fid;
|
||||
fid AsText(GeometryN(g, 2))
|
||||
120 LINESTRING(0 0,10 10)
|
||||
121 LINESTRING(3 6,7 9)
|
||||
SELECT fid, AsText(GeometryN(g, 1)) from gis_geometrycollection ORDER by fid;
|
||||
fid AsText(GeometryN(g, 1))
|
||||
120 POINT(0 0)
|
||||
121 POINT(44 6)
|
||||
explain extended SELECT fid, AsText(GeometryN(g, 2)) from gis_multi_point;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE gis_multi_point ALL NULL NULL NULL NULL 3 100.00
|
||||
Warnings:
|
||||
Note 1003 select `test`.`gis_multi_point`.`fid` AS `fid`,astext(geometryn(`test`.`gis_multi_point`.`g`,2)) AS `AsText(GeometryN(g, 2))` from `test`.`gis_multi_point`
|
||||
SELECT g1.fid as first, g2.fid as second,
|
||||
Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o,
|
||||
Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t,
|
||||
Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r
|
||||
FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second;
|
||||
first second w c o e d t i r
|
||||
120 120 1 1 0 1 0 0 1 0
|
||||
120 121 0 0 0 0 0 0 1 0
|
||||
121 120 0 0 1 0 0 0 1 0
|
||||
121 121 1 1 0 1 0 0 1 0
|
||||
explain extended SELECT g1.fid as first, g2.fid as second,
|
||||
Within(g1.g, g2.g) as w, Contains(g1.g, g2.g) as c, Overlaps(g1.g, g2.g) as o,
|
||||
Equals(g1.g, g2.g) as e, Disjoint(g1.g, g2.g) as d, Touches(g1.g, g2.g) as t,
|
||||
Intersects(g1.g, g2.g) as i, Crosses(g1.g, g2.g) as r
|
||||
FROM gis_geometrycollection g1, gis_geometrycollection g2 ORDER BY first, second;
|
||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE g1 ALL NULL NULL NULL NULL 2 100.00 Using temporary; Using filesort
|
||||
1 SIMPLE g2 ALL NULL NULL NULL NULL 2 100.00
|
||||
Warnings:
|
||||
Note 1003 select `test`.`g1`.`fid` AS `first`,`test`.`g2`.`fid` AS `second`,within(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `w`,contains(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `c`,overlaps(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `o`,equals(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `e`,disjoint(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `d`,touches(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `t`,intersects(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `i`,crosses(`test`.`g1`.`g`,`test`.`g2`.`g`) AS `r` from `test`.`gis_geometrycollection` `g1` join `test`.`gis_geometrycollection` `g2` order by `test`.`g1`.`fid`,`test`.`g2`.`fid`
|
||||
DROP TABLE gis_point, gis_line, gis_polygon, gis_multi_point, gis_multi_line, gis_multi_polygon, gis_geometrycollection, gis_geometry;
|
||||
CREATE TABLE t1 (
|
||||
a INTEGER PRIMARY KEY AUTO_INCREMENT,
|
||||
gp point,
|
||||
ln linestring,
|
||||
pg polygon,
|
||||
mp multipoint,
|
||||
mln multilinestring,
|
||||
mpg multipolygon,
|
||||
gc geometrycollection,
|
||||
gm geometry
|
||||
);
|
||||
SHOW FIELDS FROM t1;
|
||||
Field Type Null Key Default Extra
|
||||
a int(11) NO PRI NULL auto_increment
|
||||
gp point YES NULL
|
||||
ln linestring YES NULL
|
||||
pg polygon YES NULL
|
||||
mp multipoint YES NULL
|
||||
mln multilinestring YES NULL
|
||||
mpg multipolygon YES NULL
|
||||
gc geometrycollection YES NULL
|
||||
gm geometry YES NULL
|
||||
ALTER TABLE t1 ADD fid INT;
|
||||
SHOW FIELDS FROM t1;
|
||||
Field Type Null Key Default Extra
|
||||
a int(11) NO PRI NULL auto_increment
|
||||
gp point YES NULL
|
||||
ln linestring YES NULL
|
||||
pg polygon YES NULL
|
||||
mp multipoint YES NULL
|
||||
mln multilinestring YES NULL
|
||||
mpg multipolygon YES NULL
|
||||
gc geometrycollection YES NULL
|
||||
gm geometry YES NULL
|
||||
fid int(11) YES NULL
|
||||
DROP TABLE t1;
|
||||
create table t1 (pk integer primary key auto_increment, a geometry not null);
|
||||
insert into t1 (a) values (GeomFromText('Point(1 2)'));
|
||||
insert into t1 (a) values ('Garbage');
|
||||
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
|
||||
insert IGNORE into t1 (a) values ('Garbage');
|
||||
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
|
||||
drop table t1;
|
||||
create table t1 (pk integer primary key auto_increment, fl geometry);
|
||||
insert into t1 (fl) values (1);
|
||||
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
|
||||
insert into t1 (fl) values (1.11);
|
||||
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
|
||||
insert into t1 (fl) values ("qwerty");
|
||||
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
|
||||
insert into t1 (fl) values (pointfromtext('point(1,1)'));
|
||||
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
|
||||
drop table t1;
|
@ -1,6 +1,6 @@
|
||||
drop table if exists t1, t2;
|
||||
reset master;
|
||||
create table t1 (a int) engine=bdb;
|
||||
create table t1 (a int) engine=innodb;
|
||||
create table t2 (a int) engine=innodb;
|
||||
begin;
|
||||
insert t1 values (5);
|
||||
@ -10,12 +10,12 @@ insert t2 values (5);
|
||||
commit;
|
||||
show binlog events from 102;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=bdb
|
||||
master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=innodb
|
||||
master-bin.000001 # Query 1 # use `test`; create table t2 (a int) engine=innodb
|
||||
master-bin.000001 # Query 1 # use `test`; BEGIN
|
||||
master-bin.000001 # Table_map 1 # table_id: # (test.t1)
|
||||
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
|
||||
master-bin.000001 # Query 1 # use `test`; COMMIT
|
||||
master-bin.000001 # Xid 1 # COMMIT /* xid= */
|
||||
master-bin.000001 # Query 1 # use `test`; BEGIN
|
||||
master-bin.000001 # Table_map 1 # table_id: # (test.t2)
|
||||
master-bin.000001 # Write_rows 1 # table_id: # flags: STMT_END_F
|
||||
|
@ -12,7 +12,7 @@ master-bin.000001 367 Xid 1 394 COMMIT /* XID */
|
||||
drop table t1;
|
||||
drop table if exists t1, t2;
|
||||
reset master;
|
||||
create table t1 (a int) engine=bdb;
|
||||
create table t1 (a int) engine=innodb;
|
||||
create table t2 (a int) engine=innodb;
|
||||
begin;
|
||||
insert t1 values (5);
|
||||
@ -22,11 +22,11 @@ insert t2 values (5);
|
||||
commit;
|
||||
show binlog events from 102;
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=bdb
|
||||
master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=innodb
|
||||
master-bin.000001 # Query 1 # use `test`; create table t2 (a int) engine=innodb
|
||||
master-bin.000001 # Query 1 # use `test`; BEGIN
|
||||
master-bin.000001 # Query 1 # use `test`; insert t1 values (5)
|
||||
master-bin.000001 # Query 1 # use `test`; COMMIT
|
||||
master-bin.000001 # Xid 1 # COMMIT /* xid= */
|
||||
master-bin.000001 # Query 1 # use `test`; BEGIN
|
||||
master-bin.000001 # Query 1 # use `test`; insert t2 values (5)
|
||||
master-bin.000001 # Xid 1 # COMMIT /* xid= */
|
||||
|
@ -479,7 +479,7 @@ drop table t1;
|
||||
create table t1 (
|
||||
c char(10) character set utf8,
|
||||
unique key a (c(1))
|
||||
) engine=bdb;
|
||||
) engine=innodb;
|
||||
insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
|
||||
insert into t1 values ('aa');
|
||||
ERROR 23000: Duplicate entry 'aa' for key 'a'
|
||||
@ -637,7 +637,7 @@ drop table t1;
|
||||
create table t1 (
|
||||
c char(10) character set utf8 collate utf8_bin,
|
||||
unique key a (c(1))
|
||||
) engine=bdb;
|
||||
) engine=innodb;
|
||||
insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
|
||||
insert into t1 values ('aa');
|
||||
ERROR 23000: Duplicate entry 'aa' for key 'a'
|
||||
@ -707,7 +707,7 @@ drop table t1;
|
||||
create table t1 (
|
||||
str varchar(255) character set utf8 not null,
|
||||
key str (str(2))
|
||||
) engine=bdb;
|
||||
) engine=innodb;
|
||||
INSERT INTO t1 VALUES ('str');
|
||||
INSERT INTO t1 VALUES ('str2');
|
||||
select * from t1 where str='str';
|
||||
@ -796,7 +796,7 @@ insert into t1 values(1,'foo'),(2,'foobar');
|
||||
select * from t1 where b like 'foob%';
|
||||
a b
|
||||
2 foobar
|
||||
alter table t1 engine=bdb;
|
||||
alter table t1 engine=innodb;
|
||||
select * from t1 where b like 'foob%';
|
||||
a b
|
||||
2 foobar
|
||||
|
@ -1,2 +0,0 @@
|
||||
Variable_name Value
|
||||
have_bdb YES
|
@ -43,7 +43,6 @@ character-sets-dir option_value
|
||||
basedir option_value
|
||||
skip-stack-trace option_value
|
||||
skip-innodb option_value
|
||||
skip-bdb option_value
|
||||
skip-ndbcluster option_value
|
||||
nonguarded option_value
|
||||
log-output option_value
|
||||
@ -64,7 +63,6 @@ character-sets-dir option_value
|
||||
basedir option_value
|
||||
skip-stack-trace option_value
|
||||
skip-innodb option_value
|
||||
skip-bdb option_value
|
||||
skip-ndbcluster option_value
|
||||
nonguarded option_value
|
||||
log-output option_value
|
||||
|
@ -22,7 +22,6 @@ basedir VALUE
|
||||
server_id VALUE
|
||||
skip-stack-trace VALUE
|
||||
skip-innodb VALUE
|
||||
skip-bdb VALUE
|
||||
skip-ndbcluster VALUE
|
||||
log-output VALUE
|
||||
SHOW INSTANCE OPTIONS mysqld2;
|
||||
@ -41,7 +40,6 @@ basedir VALUE
|
||||
server_id VALUE
|
||||
skip-stack-trace VALUE
|
||||
skip-innodb VALUE
|
||||
skip-bdb VALUE
|
||||
skip-ndbcluster VALUE
|
||||
nonguarded VALUE
|
||||
log-output VALUE
|
||||
|
@ -1,136 +0,0 @@
|
||||
drop table if exists t1;
|
||||
create table t1 (
|
||||
pk int primary key,
|
||||
key1 int,
|
||||
key2 int,
|
||||
filler char(200),
|
||||
filler2 char(200),
|
||||
index(key1),
|
||||
index(key2)
|
||||
) engine=bdb;
|
||||
select * from t1 where (key1 >= 2 and key1 <= 10) or (pk >= 4 and pk <=8 );
|
||||
pk key1 key2 filler filler2
|
||||
2 2 2 filler-data filler-data-2
|
||||
3 3 3 filler-data filler-data-2
|
||||
9 9 9 filler-data filler-data-2
|
||||
10 10 10 filler-data filler-data-2
|
||||
4 4 4 filler-data filler-data-2
|
||||
5 5 5 filler-data filler-data-2
|
||||
6 6 6 filler-data filler-data-2
|
||||
7 7 7 filler-data filler-data-2
|
||||
8 8 8 filler-data filler-data-2
|
||||
set @maxv=1000;
|
||||
select * from t1 where
|
||||
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
|
||||
or key1=18 or key1=60;
|
||||
pk key1 key2 filler filler2
|
||||
18 18 18 filler-data filler-data-2
|
||||
60 60 60 filler-data filler-data-2
|
||||
1 1 1 filler-data filler-data-2
|
||||
2 2 2 filler-data filler-data-2
|
||||
3 3 3 filler-data filler-data-2
|
||||
4 4 4 filler-data filler-data-2
|
||||
11 11 11 filler-data filler-data-2
|
||||
12 12 12 filler-data filler-data-2
|
||||
13 13 13 filler-data filler-data-2
|
||||
14 14 14 filler-data filler-data-2
|
||||
50 50 50 filler-data filler-data-2
|
||||
51 51 51 filler-data filler-data-2
|
||||
52 52 52 filler-data filler-data-2
|
||||
53 53 53 filler-data filler-data-2
|
||||
54 54 54 filler-data filler-data-2
|
||||
991 991 991 filler-data filler-data-2
|
||||
992 992 992 filler-data filler-data-2
|
||||
993 993 993 filler-data filler-data-2
|
||||
994 994 994 filler-data filler-data-2
|
||||
995 995 995 filler-data filler-data-2
|
||||
996 996 996 filler-data filler-data-2
|
||||
997 997 997 filler-data filler-data-2
|
||||
998 998 998 filler-data filler-data-2
|
||||
999 999 999 filler-data filler-data-2
|
||||
1000 1000 1000 filler-data filler-data-2
|
||||
select * from t1 where
|
||||
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
|
||||
or key1 < 3 or key1 > @maxv-11;
|
||||
pk key1 key2 filler filler2
|
||||
990 990 990 filler-data filler-data-2
|
||||
1 1 1 filler-data filler-data-2
|
||||
2 2 2 filler-data filler-data-2
|
||||
3 3 3 filler-data filler-data-2
|
||||
4 4 4 filler-data filler-data-2
|
||||
11 11 11 filler-data filler-data-2
|
||||
12 12 12 filler-data filler-data-2
|
||||
13 13 13 filler-data filler-data-2
|
||||
14 14 14 filler-data filler-data-2
|
||||
50 50 50 filler-data filler-data-2
|
||||
51 51 51 filler-data filler-data-2
|
||||
52 52 52 filler-data filler-data-2
|
||||
53 53 53 filler-data filler-data-2
|
||||
54 54 54 filler-data filler-data-2
|
||||
991 991 991 filler-data filler-data-2
|
||||
992 992 992 filler-data filler-data-2
|
||||
993 993 993 filler-data filler-data-2
|
||||
994 994 994 filler-data filler-data-2
|
||||
995 995 995 filler-data filler-data-2
|
||||
996 996 996 filler-data filler-data-2
|
||||
997 997 997 filler-data filler-data-2
|
||||
998 998 998 filler-data filler-data-2
|
||||
999 999 999 filler-data filler-data-2
|
||||
1000 1000 1000 filler-data filler-data-2
|
||||
select * from t1 where
|
||||
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
|
||||
or
|
||||
(key1 < 5) or (key1 > 10 and key1 < 15) or (key1 >= 50 and key1 < 55 ) or (key1 > @maxv-10);
|
||||
pk key1 key2 filler filler2
|
||||
1 1 1 filler-data filler-data-2
|
||||
2 2 2 filler-data filler-data-2
|
||||
3 3 3 filler-data filler-data-2
|
||||
4 4 4 filler-data filler-data-2
|
||||
11 11 11 filler-data filler-data-2
|
||||
12 12 12 filler-data filler-data-2
|
||||
13 13 13 filler-data filler-data-2
|
||||
14 14 14 filler-data filler-data-2
|
||||
50 50 50 filler-data filler-data-2
|
||||
51 51 51 filler-data filler-data-2
|
||||
52 52 52 filler-data filler-data-2
|
||||
53 53 53 filler-data filler-data-2
|
||||
54 54 54 filler-data filler-data-2
|
||||
991 991 991 filler-data filler-data-2
|
||||
992 992 992 filler-data filler-data-2
|
||||
993 993 993 filler-data filler-data-2
|
||||
994 994 994 filler-data filler-data-2
|
||||
995 995 995 filler-data filler-data-2
|
||||
996 996 996 filler-data filler-data-2
|
||||
997 997 997 filler-data filler-data-2
|
||||
998 998 998 filler-data filler-data-2
|
||||
999 999 999 filler-data filler-data-2
|
||||
1000 1000 1000 filler-data filler-data-2
|
||||
select * from t1 where
|
||||
(pk > 10 and pk < 15) or (pk >= 50 and pk < 55 )
|
||||
or
|
||||
(key1 < 5) or (key1 > @maxv-10);
|
||||
pk key1 key2 filler filler2
|
||||
1 1 1 filler-data filler-data-2
|
||||
2 2 2 filler-data filler-data-2
|
||||
3 3 3 filler-data filler-data-2
|
||||
4 4 4 filler-data filler-data-2
|
||||
991 991 991 filler-data filler-data-2
|
||||
992 992 992 filler-data filler-data-2
|
||||
993 993 993 filler-data filler-data-2
|
||||
994 994 994 filler-data filler-data-2
|
||||
995 995 995 filler-data filler-data-2
|
||||
996 996 996 filler-data filler-data-2
|
||||
997 997 997 filler-data filler-data-2
|
||||
998 998 998 filler-data filler-data-2
|
||||
999 999 999 filler-data filler-data-2
|
||||
1000 1000 1000 filler-data filler-data-2
|
||||
11 11 11 filler-data filler-data-2
|
||||
12 12 12 filler-data filler-data-2
|
||||
13 13 13 filler-data filler-data-2
|
||||
14 14 14 filler-data filler-data-2
|
||||
50 50 50 filler-data filler-data-2
|
||||
51 51 51 filler-data filler-data-2
|
||||
52 52 52 filler-data filler-data-2
|
||||
53 53 53 filler-data filler-data-2
|
||||
54 54 54 filler-data filler-data-2
|
||||
drop table t1;
|
@ -29,13 +29,13 @@ on (mysql.general_log.command_type = join_test.command_type)
|
||||
drop table join_test;
|
||||
flush logs;
|
||||
lock tables mysql.general_log WRITE;
|
||||
ERROR HY000: You can't write-lock a log table. Only read access is possible.
|
||||
ERROR HY000: You can't write-lock a log table. Only read access is possible
|
||||
lock tables mysql.slow_log WRITE;
|
||||
ERROR HY000: You can't write-lock a log table. Only read access is possible.
|
||||
ERROR HY000: You can't write-lock a log table. Only read access is possible
|
||||
lock tables mysql.general_log READ;
|
||||
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead.
|
||||
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead
|
||||
lock tables mysql.slow_log READ;
|
||||
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead.
|
||||
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead
|
||||
lock tables mysql.slow_log READ LOCAL, mysql.general_log READ LOCAL;
|
||||
unlock tables;
|
||||
lock tables mysql.general_log READ LOCAL;
|
||||
@ -161,13 +161,13 @@ TIMESTAMP USER_HOST THREAD_ID 1 Query set global slow_query_log='ON'
|
||||
TIMESTAMP USER_HOST THREAD_ID 1 Query select * from mysql.general_log
|
||||
flush logs;
|
||||
lock tables mysql.general_log WRITE;
|
||||
ERROR HY000: You can't write-lock a log table. Only read access is possible.
|
||||
ERROR HY000: You can't write-lock a log table. Only read access is possible
|
||||
lock tables mysql.slow_log WRITE;
|
||||
ERROR HY000: You can't write-lock a log table. Only read access is possible.
|
||||
ERROR HY000: You can't write-lock a log table. Only read access is possible
|
||||
lock tables mysql.general_log READ;
|
||||
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead.
|
||||
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead
|
||||
lock tables mysql.slow_log READ;
|
||||
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead.
|
||||
ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead
|
||||
lock tables mysql.slow_log READ LOCAL, mysql.general_log READ LOCAL;
|
||||
unlock tables;
|
||||
set global general_log='OFF';
|
||||
|
@ -492,7 +492,7 @@ create table t2 like t1;
|
||||
insert into t2 select * from t1;
|
||||
delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b;
|
||||
drop table t1,t2;
|
||||
create table t1 ( c char(8) not null ) engine=bdb;
|
||||
create table t1 ( c char(8) not null ) engine=innodb;
|
||||
insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9');
|
||||
insert into t1 values ('A'),('B'),('C'),('D'),('E'),('F');
|
||||
alter table t1 add b char(8) not null;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,186 +0,0 @@
|
||||
drop table if exists t1, t2, t3,t4;
|
||||
create table t1 (
|
||||
pk1 int not NULL,
|
||||
key1 int(11),
|
||||
key2 int(11),
|
||||
PRIMARY KEY (pk1),
|
||||
KEY key1 (key1),
|
||||
KEY key2 (key2)
|
||||
) engine=bdb;
|
||||
insert into t1 values (-5, 1, 1),
|
||||
(-100, 1, 1),
|
||||
(3, 1, 1),
|
||||
(0, 1, 1),
|
||||
(10, 1, 1);
|
||||
explain select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 index_merge key1,key2 key1,key2 5,5 NULL 5 Using sort_union(key1,key2); Using where
|
||||
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
|
||||
pk1 key1 key2
|
||||
-100 1 1
|
||||
-5 1 1
|
||||
0 1 1
|
||||
3 1 1
|
||||
10 1 1
|
||||
drop table t1;
|
||||
create table t1 (
|
||||
pk1 int unsigned not NULL,
|
||||
key1 int(11),
|
||||
key2 int(11),
|
||||
PRIMARY KEY (pk1),
|
||||
KEY key1 (key1),
|
||||
KEY key2 (key2)
|
||||
) engine=bdb;
|
||||
insert into t1 values (0, 1, 1),
|
||||
(0xFFFFFFFF, 1, 1),
|
||||
(0xFFFFFFFE, 1, 1),
|
||||
(1, 1, 1),
|
||||
(2, 1, 1);
|
||||
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
|
||||
pk1 key1 key2
|
||||
0 1 1
|
||||
1 1 1
|
||||
2 1 1
|
||||
4294967294 1 1
|
||||
4294967295 1 1
|
||||
drop table t1;
|
||||
create table t1 (
|
||||
pk1 char(4) not NULL,
|
||||
key1 int(11),
|
||||
key2 int(11),
|
||||
PRIMARY KEY (pk1),
|
||||
KEY key1 (key1),
|
||||
KEY key2 (key2)
|
||||
) engine=bdb collate latin2_general_ci;
|
||||
insert into t1 values ('a1', 1, 1),
|
||||
('b2', 1, 1),
|
||||
('A3', 1, 1),
|
||||
('B4', 1, 1);
|
||||
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
|
||||
pk1 key1 key2
|
||||
a1 1 1
|
||||
A3 1 1
|
||||
b2 1 1
|
||||
B4 1 1
|
||||
drop table t1;
|
||||
create table t1 (
|
||||
pk1 int not NULL,
|
||||
pk2 char(4) not NULL collate latin1_german1_ci,
|
||||
pk3 char(4) not NULL collate latin1_bin,
|
||||
key1 int(11),
|
||||
key2 int(11),
|
||||
PRIMARY KEY (pk1,pk2,pk3),
|
||||
KEY key1 (key1),
|
||||
KEY key2 (key2)
|
||||
) engine=bdb;
|
||||
insert into t1 values
|
||||
(1, 'u', 'u', 1, 1),
|
||||
(1, 'u', char(0xEC), 1, 1),
|
||||
(1, 'u', 'x', 1, 1);
|
||||
insert ignore into t1 select pk1, char(0xEC), pk3, key1, key2 from t1;
|
||||
insert ignore into t1 select pk1, 'x', pk3, key1, key2 from t1 where pk2='u';
|
||||
insert ignore into t1 select 2, pk2, pk3, key1, key2 from t1;
|
||||
select * from t1;
|
||||
pk1 pk2 pk3 key1 key2
|
||||
1 <09> u 1 1
|
||||
1 <09> x 1 1
|
||||
1 <09> <09> 1 1
|
||||
1 u u 1 1
|
||||
1 u x 1 1
|
||||
1 u <09> 1 1
|
||||
1 x u 1 1
|
||||
1 x x 1 1
|
||||
1 x <09> 1 1
|
||||
2 <09> u 1 1
|
||||
2 <09> x 1 1
|
||||
2 <09> <09> 1 1
|
||||
2 u u 1 1
|
||||
2 u x 1 1
|
||||
2 u <09> 1 1
|
||||
2 x u 1 1
|
||||
2 x x 1 1
|
||||
2 x <09> 1 1
|
||||
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
|
||||
pk1 pk2 pk3 key1 key2
|
||||
1 <09> u 1 1
|
||||
1 <09> x 1 1
|
||||
1 <09> <09> 1 1
|
||||
1 u u 1 1
|
||||
1 u x 1 1
|
||||
1 u <09> 1 1
|
||||
1 x u 1 1
|
||||
1 x x 1 1
|
||||
1 x <09> 1 1
|
||||
2 <09> u 1 1
|
||||
2 <09> x 1 1
|
||||
2 <09> <09> 1 1
|
||||
2 u u 1 1
|
||||
2 u x 1 1
|
||||
2 u <09> 1 1
|
||||
2 x u 1 1
|
||||
2 x x 1 1
|
||||
2 x <09> 1 1
|
||||
alter table t1 drop primary key;
|
||||
select * from t1;
|
||||
pk1 pk2 pk3 key1 key2
|
||||
1 <09> u 1 1
|
||||
1 <09> x 1 1
|
||||
1 <09> <09> 1 1
|
||||
1 u u 1 1
|
||||
1 u x 1 1
|
||||
1 u <09> 1 1
|
||||
1 x u 1 1
|
||||
1 x x 1 1
|
||||
1 x <09> 1 1
|
||||
2 <09> u 1 1
|
||||
2 <09> x 1 1
|
||||
2 <09> <09> 1 1
|
||||
2 u u 1 1
|
||||
2 u x 1 1
|
||||
2 u <09> 1 1
|
||||
2 x u 1 1
|
||||
2 x x 1 1
|
||||
2 x <09> 1 1
|
||||
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
|
||||
pk1 pk2 pk3 key1 key2
|
||||
1 <09> u 1 1
|
||||
1 <09> x 1 1
|
||||
1 <09> <09> 1 1
|
||||
1 u u 1 1
|
||||
1 u x 1 1
|
||||
1 u <09> 1 1
|
||||
1 x u 1 1
|
||||
1 x x 1 1
|
||||
1 x <09> 1 1
|
||||
2 <09> u 1 1
|
||||
2 <09> x 1 1
|
||||
2 <09> <09> 1 1
|
||||
2 u u 1 1
|
||||
2 u x 1 1
|
||||
2 u <09> 1 1
|
||||
2 x u 1 1
|
||||
2 x x 1 1
|
||||
2 x <09> 1 1
|
||||
drop table t1;
|
||||
create table t1 (
|
||||
pk1 varchar(8) NOT NULL default '',
|
||||
pk2 varchar(4) NOT NULL default '',
|
||||
key1 int(11),
|
||||
key2 int(11),
|
||||
primary key(pk1, pk2),
|
||||
KEY key1 (key1),
|
||||
KEY key2 (key2)
|
||||
) engine=bdb;
|
||||
insert into t1 values ('','empt',2,2),
|
||||
('a','a--a',2,2),
|
||||
('bb','b--b',2,2),
|
||||
('ccc','c--c',2,2),
|
||||
('dddd','d--d',2,2);
|
||||
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
|
||||
pk1 pk2 key1 key2
|
||||
empt 2 2
|
||||
a a--a 2 2
|
||||
bb b--b 2 2
|
||||
ccc c--c 2 2
|
||||
dddd d--d 2 2
|
||||
drop table t1;
|
@ -692,7 +692,7 @@ drop database mysqltest;
|
||||
show full plugin;
|
||||
show warnings;
|
||||
Level Code Message
|
||||
Warning 1541 The syntax 'SHOW PLUGIN' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW PLUGINS' instead.
|
||||
Warning 1541 The syntax 'SHOW PLUGIN' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW PLUGINS' instead
|
||||
show plugin;
|
||||
show plugins;
|
||||
End of 5.1 tests
|
||||
|
@ -535,7 +535,7 @@ use db_bug7787|
|
||||
CREATE PROCEDURE p1()
|
||||
SHOW INNODB STATUS; |
|
||||
Warnings:
|
||||
Warning 1541 The syntax 'SHOW INNODB STATUS' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW ENGINE INNODB STATUS' instead.
|
||||
Warning 1541 The syntax 'SHOW INNODB STATUS' is deprecated and will be removed in MySQL 5.2. Please use 'SHOW ENGINE INNODB STATUS' instead
|
||||
GRANT EXECUTE ON PROCEDURE p1 TO user_bug7787@localhost|
|
||||
DROP DATABASE db_bug7787|
|
||||
drop user user_bug7787@localhost|
|
||||
|
@ -101,13 +101,13 @@ create table t1 (t2 timestamp(2), t4 timestamp(4), t6 timestamp(6),
|
||||
t8 timestamp(8), t10 timestamp(10), t12 timestamp(12),
|
||||
t14 timestamp(14));
|
||||
Warnings:
|
||||
Warning 1541 The syntax 'TIMESTAMP(2)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead.
|
||||
Warning 1541 The syntax 'TIMESTAMP(4)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead.
|
||||
Warning 1541 The syntax 'TIMESTAMP(6)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead.
|
||||
Warning 1541 The syntax 'TIMESTAMP(8)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead.
|
||||
Warning 1541 The syntax 'TIMESTAMP(10)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead.
|
||||
Warning 1541 The syntax 'TIMESTAMP(12)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead.
|
||||
Warning 1541 The syntax 'TIMESTAMP(14)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead.
|
||||
Warning 1541 The syntax 'TIMESTAMP(2)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
|
||||
Warning 1541 The syntax 'TIMESTAMP(4)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
|
||||
Warning 1541 The syntax 'TIMESTAMP(6)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
|
||||
Warning 1541 The syntax 'TIMESTAMP(8)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
|
||||
Warning 1541 The syntax 'TIMESTAMP(10)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
|
||||
Warning 1541 The syntax 'TIMESTAMP(12)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
|
||||
Warning 1541 The syntax 'TIMESTAMP(14)' is deprecated and will be removed in MySQL 5.2. Please use 'TIMESTAMP' instead
|
||||
insert t1 values (0,0,0,0,0,0,0),
|
||||
("1997-12-31 23:47:59", "1997-12-31 23:47:59", "1997-12-31 23:47:59",
|
||||
"1997-12-31 23:47:59", "1997-12-31 23:47:59", "1997-12-31 23:47:59",
|
||||
|
@ -175,7 +175,7 @@ Warning 1266 Using storage engine MyISAM for table 't1'
|
||||
drop table t1;
|
||||
set table_type=MYISAM;
|
||||
Warnings:
|
||||
Warning 1541 The syntax 'table_type' is deprecated and will be removed in MySQL 5.2. Please use 'storage_engine' instead.
|
||||
Warning 1541 The syntax 'table_type' is deprecated and will be removed in MySQL 5.2. Please use 'storage_engine' instead
|
||||
create table t1 (a int);
|
||||
insert into t1 (a) values (1), (2), (3), (4), (5), (6), (7), (8), (9), (10);
|
||||
update t1 set a='abc';
|
||||
|
@ -1,18 +0,0 @@
|
||||
#
|
||||
# Test of problem when shutting down mysqld at once after ALTER TABLE
|
||||
#
|
||||
-- source include/have_bdb.inc
|
||||
--disable_warnings
|
||||
drop table if exists t1;
|
||||
--enable_warnings
|
||||
|
||||
create table t1(objid BIGINT not null, tablename varchar(64), oid BIGINT not null, test BIGINT, PRIMARY KEY (objid), UNIQUE(tablename)) engine=BDB;
|
||||
insert into t1 values(1, 't1',4,9);
|
||||
insert into t1 values(2, 'metatable',1,9);
|
||||
insert into t1 values(3, 'metaindex',1,9 );
|
||||
select * from t1;
|
||||
alter table t1 drop column test;
|
||||
|
||||
# Now we do a reboot and continue with the next test
|
||||
|
||||
# End of 4.1 tests
|
@ -1,2 +0,0 @@
|
||||
--skip-external-locking
|
||||
|
@ -1,10 +0,0 @@
|
||||
#
|
||||
# Note that this test uses tables from the previous test
|
||||
# This is to test that the table t1 survives a reboot of MySQL
|
||||
# The options in the -master.opt file are just there to force the reboot
|
||||
#
|
||||
-- source include/have_bdb.inc
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
# End of 4.1 tests
|
@ -1,51 +0,0 @@
|
||||
-- source include/have_bdb.inc
|
||||
|
||||
# test for bug reported by Mark Steele
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1;
|
||||
--enable_warnings
|
||||
CREATE TABLE t1 (
|
||||
ChargeID int(10) unsigned NOT NULL auto_increment,
|
||||
ServiceID int(10) unsigned DEFAULT '0' NOT NULL,
|
||||
ChargeDate date DEFAULT '0000-00-00' NOT NULL,
|
||||
ChargeAmount decimal(20,2) DEFAULT '0.00' NOT NULL,
|
||||
FedTaxes decimal(20,2) DEFAULT '0.00' NOT NULL,
|
||||
ProvTaxes decimal(20,2) DEFAULT '0.00' NOT NULL,
|
||||
ChargeStatus enum('New','Auth','Unauth','Sale','Denied','Refund')
|
||||
DEFAULT 'New' NOT NULL,
|
||||
ChargeAuthorizationMessage text,
|
||||
ChargeComment text,
|
||||
ChargeTimeStamp varchar(20),
|
||||
PRIMARY KEY (ChargeID),
|
||||
KEY ServiceID (ServiceID),
|
||||
KEY ChargeDate (ChargeDate)
|
||||
) engine=BDB;
|
||||
|
||||
BEGIN;
|
||||
INSERT INTO t1
|
||||
VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now');
|
||||
COMMIT;
|
||||
|
||||
BEGIN;
|
||||
UPDATE t1 SET ChargeAuthorizationMessage = 'blablabla' WHERE
|
||||
ChargeID = 1;
|
||||
COMMIT;
|
||||
|
||||
INSERT INTO t1
|
||||
VALUES(NULL,1,'2001-03-01',1,1,1,'New',NULL,NULL,'now');
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Test for bug #2342 "Running ANALYZE TABLE on bdb table
|
||||
# inside a transaction hangs server thread"
|
||||
|
||||
create table t1 (a int) engine=bdb;
|
||||
|
||||
set autocommit=0;
|
||||
insert into t1 values(1);
|
||||
analyze table t1;
|
||||
drop table t1;
|
||||
|
||||
# End of 4.1 tests
|
@ -1,59 +0,0 @@
|
||||
# This test doesn't work with the embedded version as this code
|
||||
# assumes that one query is running while we are doing queries on
|
||||
# a second connection.
|
||||
# This would work if mysqltest run would be threaded and handle each
|
||||
# connection in a separate thread.
|
||||
#
|
||||
|
||||
-- source include/not_embedded.inc
|
||||
-- source include/have_bdb.inc
|
||||
|
||||
connect (con1,localhost,root,,);
|
||||
connect (con2,localhost,root,,);
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1,t2;
|
||||
--enable_warnings
|
||||
connection con1;
|
||||
create table t1 (id integer, x integer) engine=BDB;
|
||||
create table t2 (id integer, x integer) engine=BDB;
|
||||
insert into t1 values(0, 0);
|
||||
insert into t2 values(0, 0);
|
||||
set autocommit=0;
|
||||
update t1 set x = 1 where id = 0;
|
||||
|
||||
connection con2;
|
||||
set autocommit=0;
|
||||
update t2 set x = 1 where id = 0;
|
||||
|
||||
# The following query should hang because con1 is locking the page
|
||||
--send
|
||||
select x from t1 where id = 0;
|
||||
|
||||
connection con1;
|
||||
# This should generate a deadlock as we are trying to access a locked row
|
||||
--send
|
||||
select x from t2 where id = 0;
|
||||
|
||||
connection con2;
|
||||
--error 1213
|
||||
reap;
|
||||
commit;
|
||||
|
||||
connection con1;
|
||||
reap;
|
||||
commit;
|
||||
|
||||
connection con2;
|
||||
select * from t1;
|
||||
select * from t2;
|
||||
commit;
|
||||
|
||||
connection con1;
|
||||
select * from t1;
|
||||
select * from t2;
|
||||
commit;
|
||||
|
||||
drop table t1,t2;
|
||||
|
||||
# End of 4.1 tests
|
@ -1,59 +0,0 @@
|
||||
# This test doesn't work with the embedded version as this code
|
||||
# assumes that one query is running while we are doing queries on
|
||||
# a second connection.
|
||||
# This would work if mysqltest run would be threaded and handle each
|
||||
# connection in a separate thread.
|
||||
#
|
||||
|
||||
#-- source include/not_embedded.inc
|
||||
-- source include/have_bdb.inc
|
||||
|
||||
connect (con1,localhost,root,,);
|
||||
connect (con2,localhost,root,,);
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1,t2;
|
||||
--enable_warnings
|
||||
connection con1;
|
||||
create table t1 (id integer, x integer) engine=BDB;
|
||||
create table t2 (id integer, x integer) engine=BDB;
|
||||
insert into t1 values(0, 0);
|
||||
insert into t2 values(0, 0);
|
||||
set autocommit=0;
|
||||
update t1 set x = 1 where id = 0;
|
||||
|
||||
connection con2;
|
||||
set autocommit=0;
|
||||
update t2 set x = 1 where id = 0;
|
||||
|
||||
# The following query should hang because con1 is locking the page
|
||||
--send
|
||||
select x from t1 where id = 0;
|
||||
|
||||
connection con1;
|
||||
# This should generate a deadlock as we are trying to access a locked row
|
||||
--send
|
||||
select x from t2 where id = 0;
|
||||
|
||||
connection con2;
|
||||
--error 1213
|
||||
reap;
|
||||
commit;
|
||||
|
||||
connection con1;
|
||||
reap;
|
||||
commit;
|
||||
|
||||
connection con2;
|
||||
select * from t1;
|
||||
select * from t2;
|
||||
commit;
|
||||
|
||||
connection con1;
|
||||
select * from t1;
|
||||
select * from t2;
|
||||
commit;
|
||||
|
||||
drop table t1,t2;
|
||||
|
||||
# End of 4.1 tests
|
File diff suppressed because it is too large
Load Diff
@ -1 +0,0 @@
|
||||
--set-variable=query_cache_size=1M
|
@ -1,53 +0,0 @@
|
||||
-- source include/have_bdb.inc
|
||||
-- source include/have_query_cache.inc
|
||||
|
||||
#
|
||||
# Without auto_commit.
|
||||
#
|
||||
--disable_warnings
|
||||
drop table if exists t1, t2, t3;
|
||||
--enable_warnings
|
||||
flush status;
|
||||
set autocommit=0;
|
||||
create table t1 (a int not null) engine=bdb;
|
||||
insert into t1 values (1),(2),(3);
|
||||
select * from t1;
|
||||
show status like "Qcache_queries_in_cache";
|
||||
drop table t1;
|
||||
set autocommit=1;
|
||||
create table t1 (a int not null) engine=bdb;
|
||||
begin;
|
||||
insert into t1 values (1),(2),(3);
|
||||
select * from t1;
|
||||
show status like "Qcache_queries_in_cache";
|
||||
drop table t1;
|
||||
create table t1 (a int not null) engine=bdb;
|
||||
create table t2 (a int not null) engine=bdb;
|
||||
create table t3 (a int not null) engine=bdb;
|
||||
insert into t1 values (1),(2);
|
||||
insert into t2 values (1),(2);
|
||||
insert into t3 values (1),(2);
|
||||
select * from t1;
|
||||
select * from t2;
|
||||
select * from t3;
|
||||
show status like "Qcache_queries_in_cache";
|
||||
show status like "Qcache_hits";
|
||||
begin;
|
||||
select * from t1;
|
||||
select * from t2;
|
||||
select * from t3;
|
||||
show status like "Qcache_queries_in_cache";
|
||||
show status like "Qcache_hits";
|
||||
insert into t1 values (3);
|
||||
insert into t2 values (3);
|
||||
insert into t1 values (4);
|
||||
select * from t1;
|
||||
select * from t2;
|
||||
select * from t3;
|
||||
show status like "Qcache_queries_in_cache";
|
||||
show status like "Qcache_hits";
|
||||
commit;
|
||||
show status like "Qcache_queries_in_cache";
|
||||
drop table if exists t1, t2, t3;
|
||||
|
||||
# End of 4.1 tests
|
@ -1,3 +0,0 @@
|
||||
-- source include/have_bdb.inc
|
||||
SET storage_engine=bdb;
|
||||
--source include/gis_generic.inc
|
@ -360,7 +360,7 @@ drop table t1;
|
||||
create table t1 (
|
||||
c char(10) character set utf8,
|
||||
unique key a (c(1))
|
||||
) engine=bdb;
|
||||
) engine=innodb;
|
||||
--enable_warnings
|
||||
insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
|
||||
--error 1062
|
||||
@ -483,7 +483,7 @@ drop table t1;
|
||||
create table t1 (
|
||||
c char(10) character set utf8 collate utf8_bin,
|
||||
unique key a (c(1))
|
||||
) engine=bdb;
|
||||
) engine=innodb;
|
||||
--enable_warnings
|
||||
insert into t1 values ('a'),('b'),('c'),('d'),('e'),('f');
|
||||
--error 1062
|
||||
@ -558,7 +558,7 @@ drop table t1;
|
||||
create table t1 (
|
||||
str varchar(255) character set utf8 not null,
|
||||
key str (str(2))
|
||||
) engine=bdb;
|
||||
) engine=innodb;
|
||||
--enable_warnings
|
||||
INSERT INTO t1 VALUES ('str');
|
||||
INSERT INTO t1 VALUES ('str2');
|
||||
@ -644,7 +644,7 @@ create table t1 (
|
||||
insert into t1 values(1,'foo'),(2,'foobar');
|
||||
select * from t1 where b like 'foob%';
|
||||
--disable_warnings
|
||||
alter table t1 engine=bdb;
|
||||
alter table t1 engine=innodb;
|
||||
--enable_warnings
|
||||
select * from t1 where b like 'foob%';
|
||||
drop table t1;
|
||||
|
@ -44,3 +44,4 @@ rpl_row_basic_7ndb : BUG#21298 2006-07-27 msvensson
|
||||
rpl_truncate_7ndb : BUG#21298 2006-07-27 msvensson
|
||||
crash_commit_before : 2006-08-02 msvensson
|
||||
rpl_ndb_dd_advance : BUG#18679 2006-07-28 jimw (Test fails randomly)
|
||||
federated_transactions : Need to be re-enabled once Patrick's merge is complete
|
||||
|
@ -10,7 +10,7 @@ CREATE TABLE federated.t1 (
|
||||
`id` int(20) NOT NULL,
|
||||
`name` varchar(32) NOT NULL default ''
|
||||
)
|
||||
DEFAULT CHARSET=latin1 ENGINE=InnoDB;
|
||||
DEFAULT CHARSET=latin1 ENGINE=innodb;
|
||||
|
||||
connection master;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
|
@ -1,52 +0,0 @@
|
||||
#
|
||||
# 2-sweeps read Index_merge test
|
||||
#
|
||||
-- source include/have_bdb.inc
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1;
|
||||
--enable_warnings
|
||||
|
||||
create table t1 (
|
||||
pk int primary key,
|
||||
key1 int,
|
||||
key2 int,
|
||||
filler char(200),
|
||||
filler2 char(200),
|
||||
index(key1),
|
||||
index(key2)
|
||||
) engine=bdb;
|
||||
|
||||
|
||||
--disable_query_log
|
||||
let $1=1000;
|
||||
while ($1)
|
||||
{
|
||||
eval insert into t1 values($1, $1, $1, 'filler-data','filler-data-2');
|
||||
dec $1;
|
||||
}
|
||||
--enable_query_log
|
||||
|
||||
select * from t1 where (key1 >= 2 and key1 <= 10) or (pk >= 4 and pk <=8 );
|
||||
|
||||
set @maxv=1000;
|
||||
|
||||
select * from t1 where
|
||||
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
|
||||
or key1=18 or key1=60;
|
||||
|
||||
select * from t1 where
|
||||
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
|
||||
or key1 < 3 or key1 > @maxv-11;
|
||||
|
||||
select * from t1 where
|
||||
(pk < 5) or (pk > 10 and pk < 15) or (pk >= 50 and pk < 55 ) or (pk > @maxv-10)
|
||||
or
|
||||
(key1 < 5) or (key1 > 10 and key1 < 15) or (key1 >= 50 and key1 < 55 ) or (key1 > @maxv-10);
|
||||
|
||||
select * from t1 where
|
||||
(pk > 10 and pk < 15) or (pk >= 50 and pk < 55 )
|
||||
or
|
||||
(key1 < 5) or (key1 > @maxv-10);
|
||||
|
||||
drop table t1;
|
@ -485,7 +485,7 @@ delete t1,t2 from t2,t1 where t1.a<'B' and t2.b=t1.b;
|
||||
drop table t1,t2;
|
||||
|
||||
--disable_warnings
|
||||
create table t1 ( c char(8) not null ) engine=bdb;
|
||||
create table t1 ( c char(8) not null ) engine=innodb;
|
||||
--enable_warnings
|
||||
|
||||
insert into t1 values ('0'),('1'),('2'),('3'),('4'),('5'),('6'),('7'),('8'),('9');
|
||||
|
@ -1,25 +0,0 @@
|
||||
###############################################
|
||||
# #
|
||||
# Prepared Statements test on BDB tables #
|
||||
# #
|
||||
###############################################
|
||||
|
||||
#
|
||||
# NOTE: PLEASE SEE ps_1general.test (bottom)
|
||||
# BEFORE ADDING NEW TEST CASES HERE !!!
|
||||
|
||||
use test;
|
||||
|
||||
-- source include/have_bdb.inc
|
||||
let $type= 'BDB' ;
|
||||
-- source include/ps_create.inc
|
||||
-- source include/ps_renew.inc
|
||||
|
||||
-- source include/ps_query.inc
|
||||
-- source include/ps_modify.inc
|
||||
-- source include/ps_modify1.inc
|
||||
-- source include/ps_conv.inc
|
||||
|
||||
drop table t1, t9;
|
||||
|
||||
# End of 4.1 tests
|
@ -1,108 +0,0 @@
|
||||
#
|
||||
# Test for rowid ordering (and comparison) functions.
|
||||
# do index_merge select for tables with PK of various types.
|
||||
#
|
||||
--disable_warnings
|
||||
drop table if exists t1, t2, t3,t4;
|
||||
--enable_warnings
|
||||
|
||||
-- source include/have_bdb.inc
|
||||
|
||||
# Signed number as rowid
|
||||
create table t1 (
|
||||
pk1 int not NULL,
|
||||
key1 int(11),
|
||||
key2 int(11),
|
||||
PRIMARY KEY (pk1),
|
||||
KEY key1 (key1),
|
||||
KEY key2 (key2)
|
||||
) engine=bdb;
|
||||
insert into t1 values (-5, 1, 1),
|
||||
(-100, 1, 1),
|
||||
(3, 1, 1),
|
||||
(0, 1, 1),
|
||||
(10, 1, 1);
|
||||
explain select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
|
||||
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
|
||||
drop table t1;
|
||||
|
||||
# Unsigned numbers as rowids
|
||||
create table t1 (
|
||||
pk1 int unsigned not NULL,
|
||||
key1 int(11),
|
||||
key2 int(11),
|
||||
PRIMARY KEY (pk1),
|
||||
KEY key1 (key1),
|
||||
KEY key2 (key2)
|
||||
) engine=bdb;
|
||||
insert into t1 values (0, 1, 1),
|
||||
(0xFFFFFFFF, 1, 1),
|
||||
(0xFFFFFFFE, 1, 1),
|
||||
(1, 1, 1),
|
||||
(2, 1, 1);
|
||||
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
|
||||
drop table t1;
|
||||
|
||||
# Case-insensitive char(N)
|
||||
create table t1 (
|
||||
pk1 char(4) not NULL,
|
||||
key1 int(11),
|
||||
key2 int(11),
|
||||
PRIMARY KEY (pk1),
|
||||
KEY key1 (key1),
|
||||
KEY key2 (key2)
|
||||
) engine=bdb collate latin2_general_ci;
|
||||
insert into t1 values ('a1', 1, 1),
|
||||
('b2', 1, 1),
|
||||
('A3', 1, 1),
|
||||
('B4', 1, 1);
|
||||
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
|
||||
drop table t1;
|
||||
|
||||
# Multi-part PK
|
||||
create table t1 (
|
||||
pk1 int not NULL,
|
||||
pk2 char(4) not NULL collate latin1_german1_ci,
|
||||
pk3 char(4) not NULL collate latin1_bin,
|
||||
key1 int(11),
|
||||
key2 int(11),
|
||||
PRIMARY KEY (pk1,pk2,pk3),
|
||||
KEY key1 (key1),
|
||||
KEY key2 (key2)
|
||||
) engine=bdb;
|
||||
insert into t1 values
|
||||
(1, 'u', 'u', 1, 1),
|
||||
(1, 'u', char(0xEC), 1, 1),
|
||||
(1, 'u', 'x', 1, 1);
|
||||
insert ignore into t1 select pk1, char(0xEC), pk3, key1, key2 from t1;
|
||||
insert ignore into t1 select pk1, 'x', pk3, key1, key2 from t1 where pk2='u';
|
||||
insert ignore into t1 select 2, pk2, pk3, key1, key2 from t1;
|
||||
select * from t1;
|
||||
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
|
||||
|
||||
# Hidden PK
|
||||
alter table t1 drop primary key;
|
||||
select * from t1;
|
||||
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
|
||||
drop table t1;
|
||||
|
||||
# Variable-length PK
|
||||
# this is also test for Bug#2688
|
||||
create table t1 (
|
||||
pk1 varchar(8) NOT NULL default '',
|
||||
pk2 varchar(4) NOT NULL default '',
|
||||
key1 int(11),
|
||||
key2 int(11),
|
||||
primary key(pk1, pk2),
|
||||
KEY key1 (key1),
|
||||
KEY key2 (key2)
|
||||
) engine=bdb;
|
||||
insert into t1 values ('','empt',2,2),
|
||||
('a','a--a',2,2),
|
||||
('bb','b--b',2,2),
|
||||
('ccc','c--c',2,2),
|
||||
('dddd','d--d',2,2);
|
||||
select * from t1 force index(key1, key2) where key1 < 3 or key2 < 3;
|
||||
|
||||
drop table t1;
|
||||
|
@ -42,7 +42,7 @@ base64_needed_encoded_length(int length_of_data)
|
||||
int
|
||||
base64_needed_decoded_length(int length_of_encoded_data)
|
||||
{
|
||||
return ceil(length_of_encoded_data * 3 / 4);
|
||||
return (int)ceil(length_of_encoded_data * 3 / 4);
|
||||
}
|
||||
|
||||
|
||||
|
@ -46,7 +46,7 @@ uint my_pread(File Filedes, byte *Buffer, uint Count, my_off_t offset,
|
||||
before seeking to the given offset
|
||||
*/
|
||||
|
||||
error= (old_offset= lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L ||
|
||||
error= (old_offset= (off_t)lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L ||
|
||||
lseek(Filedes, offset, MY_SEEK_SET) == -1L;
|
||||
|
||||
if (!error) /* Seek was successful */
|
||||
@ -121,7 +121,7 @@ uint my_pwrite(int Filedes, const byte *Buffer, uint Count, my_off_t offset,
|
||||
As we cannot change the file pointer, we save the old position,
|
||||
before seeking to the given offset
|
||||
*/
|
||||
error= ((old_offset= lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L ||
|
||||
error= ((old_offset= (off_t)lseek(Filedes, 0L, MY_SEEK_CUR)) == -1L ||
|
||||
lseek(Filedes, offset, MY_SEEK_SET) == -1L);
|
||||
|
||||
if (!error) /* Seek was successful */
|
||||
|
@ -361,7 +361,6 @@ int mysql_install_db(int argc, char *argv[])
|
||||
add_arg(&al, "--bootstrap");
|
||||
add_arg(&al, "--skip-grant-tables");
|
||||
add_arg(&al, "--skip-innodb");
|
||||
add_arg(&al, "--skip-bdb");
|
||||
|
||||
// spawn mysqld
|
||||
err = spawn(mysqld, &al, TRUE, sql_file, out_log, err_log);
|
||||
|
@ -210,7 +210,6 @@ void install_db(char *datadir)
|
||||
add_arg(&al, "--basedir=%s", base_dir);
|
||||
add_arg(&al, "--datadir=%s", datadir);
|
||||
add_arg(&al, "--skip-innodb");
|
||||
add_arg(&al, "--skip-bdb");
|
||||
|
||||
// spawn
|
||||
if ((err = spawn(mysqld_file, &al, TRUE, input, output, error)) != 0)
|
||||
|
@ -212,7 +212,7 @@ then
|
||||
fi
|
||||
mysqld_install_cmd_line="$mysqld $defaults $mysqld_opt --bootstrap \
|
||||
--skip-grant-tables --basedir=$basedir --datadir=$ldata --skip-innodb \
|
||||
--skip-bdb --skip-ndbcluster $args --max_allowed_packet=8M --net_buffer_length=16K"
|
||||
--skip-ndbcluster $args --max_allowed_packet=8M --net_buffer_length=16K"
|
||||
if $scriptdir/mysql_create_system_tables $create_option $mdata $hostname $windows \
|
||||
| eval "$mysqld_install_cmd_line"
|
||||
then
|
||||
|
@ -429,7 +429,7 @@ str_to_datetime(const char *str, uint length, MYSQL_TIME *l_time,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (check_date(l_time, not_zero_date, flags, was_cut))
|
||||
if ((my_bool)check_date(l_time, not_zero_date, flags, was_cut))
|
||||
goto err;
|
||||
|
||||
l_time->time_type= (number_of_fields <= 3 ?
|
||||
|
@ -8,8 +8,7 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
|
||||
${CMAKE_SOURCE_DIR}/sql
|
||||
${CMAKE_SOURCE_DIR}/regex
|
||||
${CMAKE_SOURCE_DIR}/zlib
|
||||
${CMAKE_SOURCE_DIR}/storage/bdb/build_win32
|
||||
${CMAKE_SOURCE_DIR}/storage/bdb/dbinc)
|
||||
)
|
||||
|
||||
SET_SOURCE_FILES_PROPERTIES(${CMAKE_SOURCE_DIR}/sql/message.rc
|
||||
${CMAKE_SOURCE_DIR}/sql/message.h
|
||||
@ -29,7 +28,7 @@ ADD_DEFINITIONS(-DHAVE_ROW_BASED_REPLICATION -DMYSQL_SERVER
|
||||
ADD_EXECUTABLE(mysqld ../sql-common/client.c derror.cc des_key_file.cc
|
||||
discover.cc ../libmysql/errmsg.c field.cc field_conv.cc
|
||||
filesort.cc gstream.cc ha_heap.cc ha_myisam.cc ha_myisammrg.cc
|
||||
ha_innodb.cc ha_partition.cc ha_federated.cc ha_berkeley.cc
|
||||
ha_innodb.cc ha_partition.cc ha_federated.cc
|
||||
handler.cc hash_filo.cc hash_filo.h
|
||||
hostname.cc init.cc item.cc item_buff.cc item_cmpfunc.cc
|
||||
item_create.cc item_func.cc item_geofunc.cc item_row.cc
|
||||
@ -79,9 +78,6 @@ ENDIF(WITH_EXAMPLE_STORAGE_ENGINE)
|
||||
IF(WITH_INNOBASE_STORAGE_ENGINE)
|
||||
TARGET_LINK_LIBRARIES(mysqld innobase)
|
||||
ENDIF(WITH_INNOBASE_STORAGE_ENGINE)
|
||||
IF(WITH_BERKELEY_STORAGE_ENGINE)
|
||||
TARGET_LINK_LIBRARIES(mysqld bdb)
|
||||
ENDIF(WITH_BERKELEY_STORAGE_ENGINE)
|
||||
|
||||
ADD_DEPENDENCIES(mysqld GenError)
|
||||
|
||||
|
@ -47,10 +47,10 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
|
||||
item_create.h item_subselect.h item_row.h \
|
||||
mysql_priv.h item_geofunc.h sql_bitmap.h \
|
||||
procedure.h sql_class.h sql_lex.h sql_list.h \
|
||||
sql_manager.h sql_map.h sql_string.h unireg.h \
|
||||
sql_map.h sql_string.h unireg.h \
|
||||
sql_error.h field.h handler.h mysqld_suffix.h \
|
||||
ha_heap.h ha_myisam.h ha_myisammrg.h ha_partition.h \
|
||||
ha_innodb.h ha_berkeley.h ha_federated.h \
|
||||
ha_innodb.h ha_federated.h \
|
||||
ha_ndbcluster.h ha_ndbcluster_binlog.h \
|
||||
ha_ndbcluster_tables.h \
|
||||
opt_range.h protocol.h rpl_tblmap.h \
|
||||
@ -88,7 +88,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc sql_partition.cc \
|
||||
discover.cc time.cc opt_range.cc opt_sum.cc \
|
||||
records.cc filesort.cc handler.cc \
|
||||
ha_heap.cc ha_myisam.cc ha_myisammrg.cc \
|
||||
ha_partition.cc ha_innodb.cc ha_berkeley.cc \
|
||||
ha_partition.cc ha_innodb.cc \
|
||||
ha_federated.cc \
|
||||
ha_ndbcluster.cc ha_ndbcluster_binlog.cc \
|
||||
sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \
|
||||
@ -161,9 +161,6 @@ lex_hash.h: gen_lex_hash$(EXEEXT)
|
||||
./gen_lex_hash$(EXEEXT) > $@
|
||||
|
||||
# the following three should eventually be moved out of this directory
|
||||
ha_berkeley.o: ha_berkeley.cc ha_berkeley.h
|
||||
$(CXXCOMPILE) @bdb_includes@ $(LM_CFLAGS) -c $<
|
||||
|
||||
ha_ndbcluster.o:ha_ndbcluster.cc ha_ndbcluster.h
|
||||
$(CXXCOMPILE) @ndbcluster_includes@ $(LM_CFLAGS) -c $<
|
||||
|
||||
|
@ -118,6 +118,11 @@ public:
|
||||
*/
|
||||
virtual String *val_str(String*,String *)=0;
|
||||
String *val_int_as_str(String *val_buffer, my_bool unsigned_flag);
|
||||
/*
|
||||
str_needs_quotes() returns TRUE if the value returned by val_str() needs
|
||||
to be quoted when used in constructing an SQL query.
|
||||
*/
|
||||
virtual bool str_needs_quotes() { return FALSE; }
|
||||
virtual Item_result result_type () const=0;
|
||||
virtual Item_result cmp_type () const { return result_type(); }
|
||||
virtual Item_result cast_to_int_type () const { return result_type(); }
|
||||
@ -417,6 +422,7 @@ public:
|
||||
uint32 max_length() { return field_length; }
|
||||
friend class create_field;
|
||||
my_decimal *val_decimal(my_decimal *);
|
||||
virtual bool str_needs_quotes() { return TRUE; }
|
||||
uint is_equal(create_field *new_field);
|
||||
};
|
||||
|
||||
@ -1385,6 +1391,7 @@ public:
|
||||
double val_real(void);
|
||||
longlong val_int(void);
|
||||
String *val_str(String*, String *);
|
||||
virtual bool str_needs_quotes() { return TRUE; }
|
||||
my_decimal *val_decimal(my_decimal *);
|
||||
int cmp(const char *a, const char *b)
|
||||
{ return cmp_binary(a, b); }
|
||||
|
2754
sql/ha_berkeley.cc
2754
sql/ha_berkeley.cc
File diff suppressed because it is too large
Load Diff
@ -1,180 +0,0 @@
|
||||
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
|
||||
#ifdef USE_PRAGMA_INTERFACE
|
||||
#pragma interface /* gcc class implementation */
|
||||
#endif
|
||||
|
||||
/* class for the the myisam handler */
|
||||
|
||||
#include <db.h>
|
||||
|
||||
#define BDB_HIDDEN_PRIMARY_KEY_LENGTH 5
|
||||
|
||||
typedef struct st_berkeley_share {
|
||||
ulonglong auto_ident;
|
||||
ha_rows rows, org_rows;
|
||||
ulong *rec_per_key;
|
||||
THR_LOCK lock;
|
||||
pthread_mutex_t mutex;
|
||||
char *table_name;
|
||||
DB *status_block, *file, **key_file;
|
||||
u_int32_t *key_type;
|
||||
uint table_name_length,use_count;
|
||||
uint status,version;
|
||||
uint ref_length;
|
||||
bool fixed_length_primary_key, fixed_length_row;
|
||||
} BDB_SHARE;
|
||||
|
||||
|
||||
class ha_berkeley: public handler
|
||||
{
|
||||
THR_LOCK_DATA lock;
|
||||
DBT last_key,current_row;
|
||||
gptr alloc_ptr;
|
||||
byte *rec_buff;
|
||||
char *key_buff, *key_buff2, *primary_key_buff;
|
||||
DB *file, **key_file;
|
||||
DB_TXN *transaction;
|
||||
u_int32_t *key_type;
|
||||
DBC *cursor;
|
||||
BDB_SHARE *share;
|
||||
ulong int_table_flags;
|
||||
ulong alloced_rec_buff_length;
|
||||
ulong changed_rows;
|
||||
uint primary_key,last_dup_key, hidden_primary_key, version;
|
||||
bool key_read, using_ignore;
|
||||
bool fix_rec_buff_for_blob(ulong length);
|
||||
byte current_ident[BDB_HIDDEN_PRIMARY_KEY_LENGTH];
|
||||
|
||||
ulong max_row_length(const byte *buf);
|
||||
int pack_row(DBT *row,const byte *record, bool new_row);
|
||||
void unpack_row(char *record, DBT *row);
|
||||
void unpack_key(char *record, DBT *key, uint index);
|
||||
DBT *create_key(DBT *key, uint keynr, char *buff, const byte *record,
|
||||
int key_length = MAX_KEY_LENGTH);
|
||||
DBT *pack_key(DBT *key, uint keynr, char *buff, const byte *key_ptr,
|
||||
uint key_length);
|
||||
int remove_key(DB_TXN *trans, uint keynr, const byte *record, DBT *prim_key);
|
||||
int remove_keys(DB_TXN *trans,const byte *record, DBT *new_record,
|
||||
DBT *prim_key, key_map *keys);
|
||||
int restore_keys(DB_TXN *trans, key_map *changed_keys, uint primary_key,
|
||||
const byte *old_row, DBT *old_key,
|
||||
const byte *new_row, DBT *new_key);
|
||||
int key_cmp(uint keynr, const byte * old_row, const byte * new_row);
|
||||
int update_primary_key(DB_TXN *trans, bool primary_key_changed,
|
||||
const byte * old_row, DBT *old_key,
|
||||
const byte * new_row, DBT *prim_key,
|
||||
bool local_using_ignore);
|
||||
int read_row(int error, char *buf, uint keynr, DBT *row, DBT *key, bool);
|
||||
DBT *get_pos(DBT *to, byte *pos);
|
||||
|
||||
public:
|
||||
ha_berkeley(TABLE_SHARE *table_arg);
|
||||
~ha_berkeley() {}
|
||||
const char *table_type() const { return "BerkeleyDB"; }
|
||||
ulong index_flags(uint idx, uint part, bool all_parts) const;
|
||||
const char *index_type(uint key_number) { return "BTREE"; }
|
||||
const char **bas_ext() const;
|
||||
ulonglong table_flags(void) const { return int_table_flags; }
|
||||
uint max_supported_keys() const { return MAX_KEY-1; }
|
||||
uint extra_rec_buf_length() const { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; }
|
||||
ha_rows estimate_rows_upper_bound();
|
||||
uint max_supported_key_length() const { return UINT_MAX32; }
|
||||
uint max_supported_key_part_length() const { return UINT_MAX32; }
|
||||
|
||||
const key_map *keys_to_use_for_scanning() { return &key_map_full; }
|
||||
|
||||
int open(const char *name, int mode, uint test_if_locked);
|
||||
int close(void);
|
||||
double scan_time();
|
||||
int write_row(byte * buf);
|
||||
int update_row(const byte * old_data, byte * new_data);
|
||||
int delete_row(const byte * buf);
|
||||
int index_init(uint index, bool sorted);
|
||||
int index_end();
|
||||
int index_read(byte * buf, const byte * key,
|
||||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_idx(byte * buf, uint index, const byte * key,
|
||||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_last(byte * buf, const byte * key, uint key_len);
|
||||
int index_next(byte * buf);
|
||||
int index_next_same(byte * buf, const byte *key, uint keylen);
|
||||
int index_prev(byte * buf);
|
||||
int index_first(byte * buf);
|
||||
int index_last(byte * buf);
|
||||
int rnd_init(bool scan);
|
||||
int rnd_end();
|
||||
int rnd_next(byte *buf);
|
||||
int rnd_pos(byte * buf, byte *pos);
|
||||
void position(const byte *record);
|
||||
void info(uint);
|
||||
int extra(enum ha_extra_function operation);
|
||||
int reset(void);
|
||||
int external_lock(THD *thd, int lock_type);
|
||||
int start_stmt(THD *thd, thr_lock_type lock_type);
|
||||
void position(byte *record);
|
||||
int analyze(THD* thd,HA_CHECK_OPT* check_opt);
|
||||
int optimize(THD* thd, HA_CHECK_OPT* check_opt);
|
||||
int check(THD* thd, HA_CHECK_OPT* check_opt);
|
||||
|
||||
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
|
||||
int create(const char *name, register TABLE *form,
|
||||
HA_CREATE_INFO *create_info);
|
||||
int delete_table(const char *name);
|
||||
int rename_table(const char* from, const char* to);
|
||||
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
|
||||
enum thr_lock_type lock_type);
|
||||
|
||||
void get_status();
|
||||
inline void get_auto_primary_key(byte *to)
|
||||
{
|
||||
pthread_mutex_lock(&share->mutex);
|
||||
share->auto_ident++;
|
||||
int5store(to,share->auto_ident);
|
||||
pthread_mutex_unlock(&share->mutex);
|
||||
}
|
||||
virtual void get_auto_increment(ulonglong offset, ulonglong increment,
|
||||
ulonglong nb_desired_values,
|
||||
ulonglong *first_value,
|
||||
ulonglong *nb_reserved_values);
|
||||
void print_error(int error, myf errflag);
|
||||
uint8 table_cache_type() { return HA_CACHE_TBL_TRANSACT; }
|
||||
bool primary_key_is_clustered() { return true; }
|
||||
int cmp_ref(const byte *ref1, const byte *ref2);
|
||||
bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
|
||||
};
|
||||
|
||||
extern const u_int32_t bdb_DB_TXN_NOSYNC;
|
||||
extern const u_int32_t bdb_DB_RECOVER;
|
||||
extern const u_int32_t bdb_DB_PRIVATE;
|
||||
extern const u_int32_t bdb_DB_DIRECT_DB;
|
||||
extern const u_int32_t bdb_DB_DIRECT_LOG;
|
||||
extern bool berkeley_shared_data;
|
||||
extern u_int32_t berkeley_init_flags,berkeley_env_flags, berkeley_lock_type,
|
||||
berkeley_lock_types[];
|
||||
extern ulong berkeley_max_lock, berkeley_log_buffer_size;
|
||||
extern ulonglong berkeley_cache_size;
|
||||
extern ulong berkeley_region_size, berkeley_cache_parts;
|
||||
extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
|
||||
extern long berkeley_lock_scan_time;
|
||||
extern TYPELIB berkeley_lock_typelib;
|
||||
|
||||
int berkeley_init(void);
|
||||
int berkeley_end(ha_panic_function type);
|
||||
bool berkeley_flush_logs(void);
|
||||
bool berkeley_show_status(THD *thd, stat_print_fn *print, enum ha_stat_type);
|
@ -1142,7 +1142,7 @@ bool ha_federated::create_where_from_key(String *to,
|
||||
Field *field= key_part->field;
|
||||
uint store_length= key_part->store_length;
|
||||
uint part_length= min(store_length, length);
|
||||
needs_quotes= 1;
|
||||
needs_quotes= field->str_needs_quotes();
|
||||
DBUG_DUMP("key, start of loop", (char *) ptr, length);
|
||||
|
||||
if (key_part->null_bit)
|
||||
@ -1663,12 +1663,15 @@ int ha_federated::write_row(byte *buf)
|
||||
{
|
||||
commas_added= TRUE;
|
||||
if ((*field)->is_null())
|
||||
insert_field_value_string.append(STRING_WITH_LEN(" NULL "));
|
||||
values_string.append(STRING_WITH_LEN(" NULL "));
|
||||
else
|
||||
{
|
||||
bool needs_quote= (*field)->str_needs_quotes();
|
||||
(*field)->val_str(&insert_field_value_string);
|
||||
if (needs_quote)
|
||||
values_string.append('\'');
|
||||
insert_field_value_string.print(&values_string);
|
||||
if (needs_quote)
|
||||
values_string.append('\'');
|
||||
|
||||
insert_field_value_string.length(0);
|
||||
@ -1676,10 +1679,6 @@ int ha_federated::write_row(byte *buf)
|
||||
/* append the field name */
|
||||
insert_string.append((*field)->field_name);
|
||||
|
||||
/* append the value */
|
||||
values_string.append(insert_field_value_string);
|
||||
insert_field_value_string.length(0);
|
||||
|
||||
/* append commas between both fields and fieldnames */
|
||||
/*
|
||||
unfortunately, we can't use the logic if *(fields + 1) to
|
||||
@ -1884,11 +1883,14 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
|
||||
update_string.append(STRING_WITH_LEN(" NULL "));
|
||||
else
|
||||
{
|
||||
my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
|
||||
/* otherwise = */
|
||||
my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set);
|
||||
bool needs_quote= (*field)->str_needs_quotes();
|
||||
(*field)->val_str(&field_value);
|
||||
if (needs_quote)
|
||||
update_string.append('\'');
|
||||
field_value.print(&update_string);
|
||||
if (needs_quote)
|
||||
update_string.append('\'');
|
||||
field_value.length(0);
|
||||
tmp_restore_column_map(table->read_set, old_map);
|
||||
@ -1903,11 +1905,14 @@ int ha_federated::update_row(const byte *old_data, byte *new_data)
|
||||
where_string.append(STRING_WITH_LEN(" IS NULL "));
|
||||
else
|
||||
{
|
||||
bool needs_quote= (*field)->str_needs_quotes();
|
||||
where_string.append(STRING_WITH_LEN(" = "));
|
||||
(*field)->val_str(&field_value,
|
||||
(char*) (old_data + (*field)->offset()));
|
||||
if (needs_quote)
|
||||
where_string.append('\'');
|
||||
field_value.print(&where_string);
|
||||
if (needs_quote)
|
||||
where_string.append('\'');
|
||||
field_value.length(0);
|
||||
}
|
||||
@ -1983,10 +1988,13 @@ int ha_federated::delete_row(const byte *buf)
|
||||
}
|
||||
else
|
||||
{
|
||||
bool needs_quote= cur_field->str_needs_quotes();
|
||||
delete_string.append(STRING_WITH_LEN(" = "));
|
||||
cur_field->val_str(&data_string);
|
||||
if (needs_quote)
|
||||
delete_string.append('\'');
|
||||
data_string.print(&delete_string);
|
||||
if (needs_quote)
|
||||
delete_string.append('\'');
|
||||
}
|
||||
delete_string.append(STRING_WITH_LEN(" AND "));
|
||||
|
@ -74,7 +74,6 @@ static const LEX_STRING sys_table_aliases[]=
|
||||
{
|
||||
{(char*)STRING_WITH_LEN("INNOBASE")}, {(char*)STRING_WITH_LEN("INNODB")},
|
||||
{(char*)STRING_WITH_LEN("NDB")}, {(char*)STRING_WITH_LEN("NDBCLUSTER")},
|
||||
{(char*)STRING_WITH_LEN("BDB")}, {(char*)STRING_WITH_LEN("BERKELEYDB")},
|
||||
{(char*)STRING_WITH_LEN("HEAP")}, {(char*)STRING_WITH_LEN("MEMORY")},
|
||||
{(char*)STRING_WITH_LEN("MERGE")}, {(char*)STRING_WITH_LEN("MRG_MYISAM")},
|
||||
{NullS, 0}
|
||||
@ -1508,7 +1507,7 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
|
||||
|
||||
/*
|
||||
Read first row (only) from a table
|
||||
This is never called for InnoDB or BDB tables, as these table types
|
||||
This is never called for InnoDB tables, as these table types
|
||||
has the HA_STATS_RECORDS_IS_EXACT set.
|
||||
*/
|
||||
|
||||
|
@ -82,10 +82,8 @@ static SYMBOL symbols[] = {
|
||||
{ "AVG", SYM(AVG_SYM)},
|
||||
{ "AVG_ROW_LENGTH", SYM(AVG_ROW_LENGTH)},
|
||||
{ "BACKUP", SYM(BACKUP_SYM)},
|
||||
{ "BDB", SYM(BERKELEY_DB_SYM)},
|
||||
{ "BEFORE", SYM(BEFORE_SYM)},
|
||||
{ "BEGIN", SYM(BEGIN_SYM)},
|
||||
{ "BERKELEYDB", SYM(BERKELEY_DB_SYM)},
|
||||
{ "BETWEEN", SYM(BETWEEN_SYM)},
|
||||
{ "BIGINT", SYM(BIGINT)},
|
||||
{ "BINARY", SYM(BINARY)},
|
||||
|
@ -1620,12 +1620,6 @@ extern handlerton innobase_hton;
|
||||
#else
|
||||
extern SHOW_COMP_OPTION have_innodb;
|
||||
#endif
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
extern handlerton berkeley_hton;
|
||||
#define have_berkeley_db berkeley_hton.state
|
||||
#else
|
||||
extern SHOW_COMP_OPTION have_berkeley_db;
|
||||
#endif
|
||||
#ifdef WITH_EXAMPLE_STORAGE_ENGINE
|
||||
extern handlerton example_hton;
|
||||
#define have_example_db example_hton.state
|
||||
|
157
sql/mysqld.cc
157
sql/mysqld.cc
@ -310,7 +310,7 @@ static bool lower_case_table_names_used= 0;
|
||||
static bool volatile select_thread_in_use, signal_thread_in_use;
|
||||
static bool volatile ready_to_exit;
|
||||
static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0;
|
||||
static my_bool opt_bdb, opt_isam, opt_ndbcluster, opt_merge;
|
||||
static my_bool opt_isam, opt_ndbcluster, opt_merge;
|
||||
static my_bool opt_short_log_format= 0;
|
||||
static uint kill_cached_threads, wake_thread;
|
||||
static ulong killed_threads, thread_created;
|
||||
@ -332,10 +332,6 @@ static I_List<THD> thread_cache;
|
||||
|
||||
static pthread_cond_t COND_thread_cache, COND_flush_thread_cache;
|
||||
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
static my_bool opt_sync_bdb_logs;
|
||||
#endif
|
||||
|
||||
/* Global variables */
|
||||
|
||||
bool opt_update_log, opt_bin_log;
|
||||
@ -405,22 +401,6 @@ extern ulong srv_commit_concurrency;
|
||||
extern ulong srv_flush_log_at_trx_commit;
|
||||
}
|
||||
#endif
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
#ifndef HAVE_U_INT32_T
|
||||
typedef unsigned int u_int32_t;
|
||||
#endif
|
||||
extern const u_int32_t bdb_DB_TXN_NOSYNC, bdb_DB_RECOVER, bdb_DB_PRIVATE,
|
||||
bdb_DB_DIRECT_DB, bdb_DB_DIRECT_LOG;
|
||||
extern bool berkeley_shared_data;
|
||||
extern u_int32_t berkeley_init_flags,berkeley_env_flags, berkeley_lock_type,
|
||||
berkeley_lock_types[];
|
||||
extern ulong berkeley_max_lock, berkeley_log_buffer_size;
|
||||
extern ulonglong berkeley_cache_size;
|
||||
extern ulong berkeley_region_size, berkeley_cache_parts;
|
||||
extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
|
||||
extern long berkeley_lock_scan_time;
|
||||
extern TYPELIB berkeley_lock_typelib;
|
||||
#endif
|
||||
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
const char *opt_ndbcluster_connectstring= 0;
|
||||
@ -3355,11 +3335,7 @@ server.");
|
||||
|
||||
static void create_maintenance_thread()
|
||||
{
|
||||
if (
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
(have_berkeley_db == SHOW_OPTION_YES) ||
|
||||
#endif
|
||||
(flush_time && flush_time != ~(ulong) 0L))
|
||||
if (flush_time && flush_time != ~(ulong) 0L)
|
||||
{
|
||||
pthread_t hThread;
|
||||
if (pthread_create(&hThread,&connection_attrib,handle_manager,0))
|
||||
@ -4901,38 +4877,6 @@ struct my_option my_long_options[] =
|
||||
"Path to installation directory. All paths are usually resolved relative to this.",
|
||||
(gptr*) &mysql_home_ptr, (gptr*) &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG,
|
||||
0, 0, 0, 0, 0, 0},
|
||||
{"bdb", OPT_BDB, "Enable Berkeley DB (if this version of MySQL supports it). \
|
||||
Disable with --skip-bdb (will save memory).",
|
||||
(gptr*) &opt_bdb, (gptr*) &opt_bdb, 0, GET_BOOL, NO_ARG, OPT_BDB_DEFAULT, 0, 0,
|
||||
0, 0, 0},
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
{"bdb-data-direct", OPT_BDB_DATA_DIRECT,
|
||||
"Turn off system buffering of BDB database files to avoid double caching.",
|
||||
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"bdb-home", OPT_BDB_HOME, "Berkeley home directory.", (gptr*) &berkeley_home,
|
||||
(gptr*) &berkeley_home, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"bdb-lock-detect", OPT_BDB_LOCK,
|
||||
"Berkeley lock detect (DEFAULT, OLDEST, RANDOM or YOUNGEST, # sec).",
|
||||
0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"bdb-log-direct", OPT_BDB_LOG_DIRECT,
|
||||
"Turn off system buffering of BDB log files to avoid double caching.",
|
||||
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"bdb-logdir", OPT_BDB_LOG, "Berkeley DB log file directory.",
|
||||
(gptr*) &berkeley_logdir, (gptr*) &berkeley_logdir, 0, GET_STR,
|
||||
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"bdb-no-recover", OPT_BDB_NO_RECOVER,
|
||||
"Don't try to recover Berkeley DB tables on start.", 0, 0, 0, GET_NO_ARG,
|
||||
NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"bdb-no-sync", OPT_BDB_NOSYNC,
|
||||
"This option is deprecated, use --skip-sync-bdb-logs instead",
|
||||
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"bdb-shared-data", OPT_BDB_SHARED,
|
||||
"Start Berkeley DB in multi-process mode.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0,
|
||||
0, 0, 0, 0, 0},
|
||||
{"bdb-tmpdir", OPT_BDB_TMP, "Berkeley DB tempfile name.",
|
||||
(gptr*) &berkeley_tmpdir, (gptr*) &berkeley_tmpdir, 0, GET_STR,
|
||||
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
|
||||
{"big-tables", OPT_BIG_TABLES,
|
||||
"Allow big result sets by saving all temporary sets on file (Solves most 'table full' errors).",
|
||||
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
@ -5747,31 +5691,6 @@ log and this option does nothing anymore.",
|
||||
"The number of outstanding connection requests MySQL can have. This comes into play when the main MySQL thread gets very many connection requests in a very short time.",
|
||||
(gptr*) &back_log, (gptr*) &back_log, 0, GET_ULONG,
|
||||
REQUIRED_ARG, 50, 1, 65535, 0, 1, 0 },
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
{ "bdb_cache_parts", OPT_BDB_CACHE_PARTS,
|
||||
"Number of parts to use for BDB cache.",
|
||||
(gptr*) &berkeley_cache_parts, (gptr*) &berkeley_cache_parts, 0, GET_ULONG,
|
||||
REQUIRED_ARG, 1, 1, 1024, 0, 1, 0},
|
||||
{ "bdb_cache_size", OPT_BDB_CACHE_SIZE,
|
||||
"The buffer that is allocated to cache index and rows for BDB tables.",
|
||||
(gptr*) &berkeley_cache_size, (gptr*) &berkeley_cache_size, 0, GET_ULL,
|
||||
REQUIRED_ARG, KEY_CACHE_SIZE, 20*1024, (ulonglong) ~0, 0, IO_SIZE, 0},
|
||||
{"bdb_lock_max", OPT_BDB_MAX_LOCK, "Synonym for bdb_max_lock.",
|
||||
(gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
|
||||
REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
|
||||
{"bdb_log_buffer_size", OPT_BDB_LOG_BUFFER_SIZE,
|
||||
"The buffer that is allocated to cache index and rows for BDB tables.",
|
||||
(gptr*) &berkeley_log_buffer_size, (gptr*) &berkeley_log_buffer_size, 0,
|
||||
GET_ULONG, REQUIRED_ARG, 0, 256*1024L, ~0L, 0, 1024, 0},
|
||||
{"bdb_max_lock", OPT_BDB_MAX_LOCK,
|
||||
"The maximum number of locks you can have active on a BDB table.",
|
||||
(gptr*) &berkeley_max_lock, (gptr*) &berkeley_max_lock, 0, GET_ULONG,
|
||||
REQUIRED_ARG, 10000, 0, (long) ~0, 0, 1, 0},
|
||||
{"bdb_region_size", OPT_BDB_REGION_SIZE,
|
||||
"The size of the underlying logging area of the Berkeley DB environment.",
|
||||
(gptr*) &berkeley_region_size, (gptr*) &berkeley_region_size, 0, GET_ULONG,
|
||||
OPT_ARG, 60*1024L, 60*1024L, (long) ~0, 0, 1, 0},
|
||||
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
|
||||
{"binlog_cache_size", OPT_BINLOG_CACHE_SIZE,
|
||||
"The size of the cache to hold the SQL statements for the binary log during a transaction. If you often use big, multi-statement transactions you can increase this to get more performance.",
|
||||
(gptr*) &binlog_cache_size, (gptr*) &binlog_cache_size, 0, GET_ULONG,
|
||||
@ -6263,12 +6182,6 @@ The minimum value for this variable is 4096.",
|
||||
(gptr*) &max_system_variables.sortbuff_size, 0, GET_ULONG, REQUIRED_ARG,
|
||||
MAX_SORT_MEMORY, MIN_SORT_MEMORY+MALLOC_OVERHEAD*2, ~0L, MALLOC_OVERHEAD,
|
||||
1, 0},
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
{"sync-bdb-logs", OPT_BDB_SYNC,
|
||||
"Synchronously flush Berkeley DB logs. Enabled by default",
|
||||
(gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL,
|
||||
NO_ARG, 1, 0, 0, 0, 0, 0},
|
||||
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
|
||||
{"sync-binlog", OPT_SYNC_BINLOG,
|
||||
"Synchronously flush binary log to disk after every #th event. "
|
||||
"Use 0 (default) to disable synchronous flushing.",
|
||||
@ -7583,59 +7496,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
|
||||
have_merge_db= SHOW_OPTION_YES;
|
||||
else
|
||||
have_merge_db= SHOW_OPTION_DISABLED;
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
case OPT_BDB_NOSYNC:
|
||||
/* Deprecated option */
|
||||
opt_sync_bdb_logs= 0;
|
||||
/* Fall through */
|
||||
case OPT_BDB_SYNC:
|
||||
if (!opt_sync_bdb_logs)
|
||||
berkeley_env_flags|= bdb_DB_TXN_NOSYNC;
|
||||
else
|
||||
berkeley_env_flags&= ~bdb_DB_TXN_NOSYNC;
|
||||
break;
|
||||
case OPT_BDB_LOG_DIRECT:
|
||||
berkeley_env_flags|= bdb_DB_DIRECT_DB;
|
||||
break;
|
||||
case OPT_BDB_DATA_DIRECT:
|
||||
berkeley_env_flags|= bdb_DB_DIRECT_LOG;
|
||||
break;
|
||||
case OPT_BDB_NO_RECOVER:
|
||||
berkeley_init_flags&= ~(bdb_DB_RECOVER);
|
||||
break;
|
||||
case OPT_BDB_LOCK:
|
||||
{
|
||||
int type;
|
||||
if ((type=find_type(argument, &berkeley_lock_typelib, 2)) > 0)
|
||||
berkeley_lock_type=berkeley_lock_types[type-1];
|
||||
else
|
||||
{
|
||||
int err;
|
||||
char *end;
|
||||
uint length= strlen(argument);
|
||||
long value= my_strntol(&my_charset_latin1, argument, length, 10, &end, &err);
|
||||
if (end == argument+length)
|
||||
berkeley_lock_scan_time= value;
|
||||
else
|
||||
{
|
||||
fprintf(stderr,"Unknown lock type: %s\n",argument);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OPT_BDB_SHARED:
|
||||
berkeley_init_flags&= ~(bdb_DB_PRIVATE);
|
||||
berkeley_shared_data= 1;
|
||||
break;
|
||||
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
|
||||
case OPT_BDB:
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
if (opt_bdb)
|
||||
have_berkeley_db= SHOW_OPTION_YES;
|
||||
else
|
||||
have_berkeley_db= SHOW_OPTION_DISABLED;
|
||||
#endif
|
||||
break;
|
||||
case OPT_NDBCLUSTER:
|
||||
#ifdef WITH_NDBCLUSTER_STORAGE_ENGINE
|
||||
@ -7868,10 +7729,6 @@ static void get_options(int argc,char **argv)
|
||||
#ifndef WITH_ISAM_STORAGE_ENGINE
|
||||
if (opt_isam)
|
||||
sql_print_warning("this binary does not contain ISAM storage engine");
|
||||
#endif
|
||||
#ifndef WITH_BERKELEY_STORAGE_ENGINE
|
||||
if (opt_bdb)
|
||||
sql_print_warning("this binary does not contain BDB storage engine");
|
||||
#endif
|
||||
if ((opt_log_slow_admin_statements || opt_log_queries_not_using_indexes) &&
|
||||
!opt_slow_log)
|
||||
@ -8215,7 +8072,6 @@ void refresh_status(THD *thd)
|
||||
/*****************************************************************************
|
||||
Instantiate have_xyx for missing storage engines
|
||||
*****************************************************************************/
|
||||
#undef have_berkeley_db
|
||||
#undef have_innodb
|
||||
#undef have_ndbcluster
|
||||
#undef have_example_db
|
||||
@ -8225,7 +8081,6 @@ void refresh_status(THD *thd)
|
||||
#undef have_partition_db
|
||||
#undef have_blackhole_db
|
||||
|
||||
SHOW_COMP_OPTION have_berkeley_db= SHOW_OPTION_NO;
|
||||
SHOW_COMP_OPTION have_innodb= SHOW_OPTION_NO;
|
||||
SHOW_COMP_OPTION have_ndbcluster= SHOW_OPTION_NO;
|
||||
SHOW_COMP_OPTION have_example_db= SHOW_OPTION_NO;
|
||||
@ -8235,14 +8090,6 @@ SHOW_COMP_OPTION have_federated_db= SHOW_OPTION_NO;
|
||||
SHOW_COMP_OPTION have_partition_db= SHOW_OPTION_NO;
|
||||
SHOW_COMP_OPTION have_blackhole_db= SHOW_OPTION_NO;
|
||||
|
||||
#ifndef WITH_BERKELEY_STORAGE_ENGINE
|
||||
bool berkeley_shared_data;
|
||||
ulong berkeley_max_lock, berkeley_log_buffer_size;
|
||||
ulonglong berkeley_cache_size;
|
||||
ulong berkeley_region_size, berkeley_cache_parts;
|
||||
char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
|
||||
#endif
|
||||
|
||||
#ifndef WITH_INNOBASE_STORAGE_ENGINE
|
||||
uint innobase_flush_log_at_trx_commit;
|
||||
ulong innobase_fast_shutdown;
|
||||
|
@ -59,13 +59,6 @@
|
||||
|
||||
#include "event_scheduler.h"
|
||||
|
||||
/* WITH_BERKELEY_STORAGE_ENGINE */
|
||||
extern bool berkeley_shared_data;
|
||||
extern ulong berkeley_max_lock, berkeley_log_buffer_size;
|
||||
extern ulonglong berkeley_cache_size;
|
||||
extern ulong berkeley_region_size, berkeley_cache_parts;
|
||||
extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir;
|
||||
|
||||
/* WITH_INNOBASE_STORAGE_ENGINE */
|
||||
extern uint innobase_flush_log_at_trx_commit;
|
||||
extern ulong innobase_fast_shutdown;
|
||||
@ -669,7 +662,6 @@ sys_var_thd_time_zone sys_time_zone("time_zone");
|
||||
/* Read only variables */
|
||||
|
||||
sys_var_have_variable sys_have_archive_db("have_archive", &have_archive_db);
|
||||
sys_var_have_variable sys_have_berkeley_db("have_bdb", &have_berkeley_db);
|
||||
sys_var_have_variable sys_have_blackhole_db("have_blackhole_engine",
|
||||
&have_blackhole_db);
|
||||
sys_var_have_variable sys_have_compress("have_compress", &have_compress);
|
||||
@ -760,15 +752,6 @@ SHOW_VAR init_vars[]= {
|
||||
{sys_automatic_sp_privileges.name,(char*) &sys_automatic_sp_privileges, SHOW_SYS},
|
||||
{"back_log", (char*) &back_log, SHOW_LONG},
|
||||
{sys_basedir.name, (char*) &sys_basedir, SHOW_SYS},
|
||||
{"bdb_cache_parts", (char*) &berkeley_cache_parts, SHOW_LONG},
|
||||
{"bdb_cache_size", (char*) &berkeley_cache_size, SHOW_LONGLONG},
|
||||
{"bdb_home", (char*) &berkeley_home, SHOW_CHAR_PTR},
|
||||
{"bdb_log_buffer_size", (char*) &berkeley_log_buffer_size, SHOW_LONG},
|
||||
{"bdb_logdir", (char*) &berkeley_logdir, SHOW_CHAR_PTR},
|
||||
{"bdb_max_lock", (char*) &berkeley_max_lock, SHOW_LONG},
|
||||
{"bdb_region_size", (char*) &berkeley_region_size, SHOW_LONG},
|
||||
{"bdb_shared_data", (char*) &berkeley_shared_data, SHOW_BOOL},
|
||||
{"bdb_tmpdir", (char*) &berkeley_tmpdir, SHOW_CHAR_PTR},
|
||||
{sys_binlog_cache_size.name,(char*) &sys_binlog_cache_size, SHOW_SYS},
|
||||
{sys_binlog_format.name, (char*) &sys_binlog_format, SHOW_SYS},
|
||||
{sys_bulk_insert_buff_size.name,(char*) &sys_bulk_insert_buff_size,SHOW_SYS},
|
||||
@ -813,7 +796,6 @@ SHOW_VAR init_vars[]= {
|
||||
{sys_var_general_log_path.name, (char*) &sys_var_general_log_path, SHOW_SYS},
|
||||
{sys_group_concat_max_len.name, (char*) &sys_group_concat_max_len, SHOW_SYS},
|
||||
{sys_have_archive_db.name, (char*) &have_archive_db, SHOW_HAVE},
|
||||
{sys_have_berkeley_db.name, (char*) &have_berkeley_db, SHOW_HAVE},
|
||||
{sys_have_blackhole_db.name,(char*) &have_blackhole_db, SHOW_HAVE},
|
||||
{sys_have_compress.name, (char*) &have_compress, SHOW_HAVE},
|
||||
{sys_have_crypt.name, (char*) &have_crypt, SHOW_HAVE},
|
||||
|
@ -5580,269 +5580,374 @@ ER_SP_RECURSION_LIMIT
|
||||
ger "Rekursionsgrenze %d (durch Variable max_sp_recursion_depth gegeben) wurde f<>r Routine %.64s <20>berschritten"
|
||||
ER_SP_PROC_TABLE_CORRUPT
|
||||
eng "Failed to load routine %-.64s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)"
|
||||
ger "Routine %-64s konnte nicht geladen werden. Die Tabelle mysql.proc fehlt, ist besch<63>digt, oder enth<74>lt fehlerhaften Daten (interner Code: %d)"
|
||||
ER_SP_WRONG_NAME 42000
|
||||
eng "Incorrect routine name '%-.64s'"
|
||||
ger "Ung<6E>ltiger Routinenname '%-.64s'"
|
||||
ER_TABLE_NEEDS_UPGRADE
|
||||
eng "Table upgrade required. Please do \"REPAIR TABLE `%-.32s`\" to fix it!"
|
||||
ger "Tabellenaktualisierung erforderlich. Bitte zum Reparieren \"REPAIR TABLE `%-.32s`\" eingeben!"
|
||||
ER_SP_NO_AGGREGATE 42000
|
||||
eng "AGGREGATE is not supported for stored functions"
|
||||
ger "AGGREGATE wird bei gespeicherten Funktionen nicht unterst<73>tzt"
|
||||
ER_MAX_PREPARED_STMT_COUNT_REACHED 42000
|
||||
eng "Can't create more than max_prepared_stmt_count statements (current value: %lu)"
|
||||
ger "Kann nicht mehr Anweisungen als max_prepared_stmt_count erzeugen (aktueller Wert: %lu)"
|
||||
ER_VIEW_RECURSIVE
|
||||
eng "`%-.64s`.`%-.64s` contains view recursion"
|
||||
ger "`%-.64s`.`%-.64s` enth<74>lt View-Rekursion"
|
||||
ER_NON_GROUPING_FIELD_USED 42000
|
||||
eng "non-grouping field '%-.64s' is used in %-.64s clause"
|
||||
ger "In der %-.64s-Klausel wird das die Nicht-Gruppierungsspalte '%-.64s' verwendet"
|
||||
ER_TABLE_CANT_HANDLE_SPKEYS
|
||||
eng "The used table type doesn't support SPATIAL indexes"
|
||||
ger "Der verwendete Tabellentyp unterst<73>tzt keine SPATIAL-Indizes"
|
||||
ER_ILLEGAL_HA_CREATE_OPTION
|
||||
eng "Table storage engine '%-.64s' does not support the create option '%.64s'"
|
||||
ger "Speicher-Engine '%-.64s' der Tabelle unterst<73>tzt die Option '%.64s' nicht"
|
||||
ER_PARTITION_REQUIRES_VALUES_ERROR
|
||||
eng "%-.64s PARTITIONING requires definition of VALUES %-.64s for each partition"
|
||||
ger "%-.64s-PARTITIONierung erfordert Definition von VALUES %-.64s f<>r jede Partition"
|
||||
swe "%-.64s PARTITIONering kr<6B>ver definition av VALUES %-.64s f<>r varje partition"
|
||||
ER_PARTITION_WRONG_VALUES_ERROR
|
||||
eng "Only %-.64s PARTITIONING can use VALUES %-.64s in partition definition"
|
||||
ger "Nur %-.64s-PARTITIONierung kann VALUES %-.64s in der Partitionsdefinition verwenden"
|
||||
swe "Endast %-.64s partitionering kan anv<6E>nda VALUES %-.64s i definition av partitionen"
|
||||
ER_PARTITION_MAXVALUE_ERROR
|
||||
eng "MAXVALUE can only be used in last partition definition"
|
||||
ger "MAXVALUE kann nur f<>r die Definition der letzten Partition verwendet werden"
|
||||
swe "MAXVALUE kan bara anv<6E>ndas i definitionen av den sista partitionen"
|
||||
ER_PARTITION_SUBPARTITION_ERROR
|
||||
eng "Subpartitions can only be hash partitions and by key"
|
||||
ger "Unterpartitionen d<>rfen nur HASH- oder KEY-Partitionen sein"
|
||||
swe "Subpartitioner kan bara vara hash och key partitioner"
|
||||
ER_PARTITION_SUBPART_MIX_ERROR
|
||||
eng "Must define subpartitions on all partitions if on one partition"
|
||||
ger "Unterpartitionen k<>nnen nur Hash- oder Key-Partitionen sein"
|
||||
swe "Subpartitioner måste definieras på alla partitioner om på en"
|
||||
|
||||
ER_PARTITION_WRONG_NO_PART_ERROR
|
||||
eng "Wrong number of partitions defined, mismatch with previous setting"
|
||||
ger "Falsche Anzahl von Partitionen definiert, stimmt nicht mit vorherigen Einstellungen <20>berein"
|
||||
swe "Antal partitioner definierade och antal partitioner <20>r inte lika"
|
||||
ER_PARTITION_WRONG_NO_SUBPART_ERROR
|
||||
eng "Wrong number of subpartitions defined, mismatch with previous setting"
|
||||
ger "Falsche Anzahl von Unterpartitionen definiert, stimmt nicht mit vorherigen Einstellungen <20>berein"
|
||||
swe "Antal subpartitioner definierade och antal subpartitioner <20>r inte lika"
|
||||
ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR
|
||||
eng "Constant/Random expression in (sub)partitioning function is not allowed"
|
||||
ger "Konstante oder Random-Ausdr<64>cke in (Unter-)Partitionsfunktionen sind nicht erlaubt"
|
||||
swe "Konstanta uttryck eller slumpm<70>ssiga uttryck <20>r inte till<6C>tna (sub)partitioneringsfunktioner"
|
||||
ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR
|
||||
eng "Expression in RANGE/LIST VALUES must be constant"
|
||||
ger "Ausdr<64>cke in RANGE/LIST VALUES m<>ssen konstant sein"
|
||||
swe "Uttryck i RANGE/LIST VALUES m<>ste vara ett konstant uttryck"
|
||||
ER_FIELD_NOT_FOUND_PART_ERROR
|
||||
eng "Field in list of fields for partition function not found in table"
|
||||
ger "Felder in der Feldliste der Partitionierungsfunktion wurden in der Tabelle nicht gefunden"
|
||||
swe "F<>lt i listan av f<>lt f<>r partitionering med key inte funnen i tabellen"
|
||||
ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR
|
||||
eng "List of fields is only allowed in KEY partitions"
|
||||
ger "Eine Feldliste ist nur in KEY-Partitionen erlaubt"
|
||||
swe "En lista av f<>lt <20>r endast till<6C>tet f<>r KEY partitioner"
|
||||
ER_INCONSISTENT_PARTITION_INFO_ERROR
|
||||
eng "The partition info in the frm file is not consistent with what can be written into the frm file"
|
||||
ger "Die Partitionierungsinformationen in der frm-Datei stimmen nicht mit dem <20>berein, was in die frm-Datei geschrieben werden kann"
|
||||
swe "Partitioneringsinformationen i frm-filen <20>r inte konsistent med vad som kan skrivas i frm-filen"
|
||||
ER_PARTITION_FUNC_NOT_ALLOWED_ERROR
|
||||
eng "The %-.64s function returns the wrong type"
|
||||
ger "Die %-.64s-Funktion gibt einen falschen Typ zur<75>ck"
|
||||
swe "%-.64s-funktionen returnerar felaktig typ"
|
||||
ER_PARTITIONS_MUST_BE_DEFINED_ERROR
|
||||
eng "For %-.64s partitions each partition must be defined"
|
||||
ger "F<>r %-.64s-Partitionen muss jede Partition definiert sein"
|
||||
swe "F<>r %-.64s partitionering s<> m<>ste varje partition definieras"
|
||||
ER_RANGE_NOT_INCREASING_ERROR
|
||||
eng "VALUES LESS THAN value must be strictly increasing for each partition"
|
||||
ger "Werte in VALUES LESS THAN m<>ssen f<>r jede Partition strikt aufsteigend sein"
|
||||
swe "V<>rden i VALUES LESS THAN m<>ste vara strikt v<>xande f<>r varje partition"
|
||||
ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR
|
||||
eng "VALUES value must be of same type as partition function"
|
||||
ger "VALUES-Werte m<>ssen vom selben Typ wie die Partitionierungsfunktion sein"
|
||||
swe "V<>rden i VALUES m<>ste vara av samma typ som partitioneringsfunktionen"
|
||||
ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR
|
||||
eng "Multiple definition of same constant in list partitioning"
|
||||
ger "Mehrfachdefinition derselben Konstante bei Listen-Partitionierung"
|
||||
swe "Multipel definition av samma konstant i list partitionering"
|
||||
ER_PARTITION_ENTRY_ERROR
|
||||
eng "Partitioning can not be used stand-alone in query"
|
||||
ger "Partitionierung kann in einer Abfrage nicht alleinstehend benutzt werden"
|
||||
swe "Partitioneringssyntax kan inte anv<6E>ndas p<> egen hand i en SQL-fr<66>ga"
|
||||
ER_MIX_HANDLER_ERROR
|
||||
eng "The mix of handlers in the partitions is not allowed in this version of MySQL"
|
||||
ger "Das Vermischen von Handlern in Partitionen ist in dieser Version von MySQL nicht erlaubt"
|
||||
swe "Denna mix av lagringsmotorer <20>r inte till<6C>ten i denna version av MySQL"
|
||||
ER_PARTITION_NOT_DEFINED_ERROR
|
||||
eng "For the partitioned engine it is necessary to define all %-.64s"
|
||||
ger "F<>r die partitionierte Engine m<>ssen alle %-.64s definiert sein"
|
||||
swe "F<>r partitioneringsmotorn s<> <20>r det n<>dv<64>ndigt att definiera alla %-.64s"
|
||||
ER_TOO_MANY_PARTITIONS_ERROR
|
||||
eng "Too many partitions (including subpartitions) were defined"
|
||||
ger "Es wurden zu vielen Partitionen (einschlie<69>lich Unterpartitionen) definiert"
|
||||
swe "F<>r m<>nga partitioner (inkluderande subpartitioner) definierades"
|
||||
ER_SUBPARTITION_ERROR
|
||||
eng "It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning"
|
||||
ger "RANGE/LIST-Partitionierung kann bei Unterpartitionen nur zusammen mit HASH/KEY-Partitionierung verwendet werden"
|
||||
swe "Det <20>r endast m<>jligt att blanda RANGE/LIST partitionering med HASH/KEY partitionering f<>r subpartitionering"
|
||||
ER_CANT_CREATE_HANDLER_FILE
|
||||
eng "Failed to create specific handler file"
|
||||
ger "Erzeugen einer spezifischen Handler-Datei fehlgeschlagen"
|
||||
swe "Misslyckades med att skapa specifik fil i lagringsmotor"
|
||||
ER_BLOB_FIELD_IN_PART_FUNC_ERROR
|
||||
eng "A BLOB field is not allowed in partition function"
|
||||
ger "In der Partitionierungsfunktion sind BLOB-Spalten nicht erlaubt"
|
||||
swe "Ett BLOB-f<>lt <20>r inte till<6C>tet i partitioneringsfunktioner"
|
||||
ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF
|
||||
eng "A %-.64s need to include all fields in the partition function"
|
||||
ger "Ein %-.64s muss alle Spalten der Partitionierungsfunktion umfassen"
|
||||
swe "En %-.64s beh<65>ver inkludera alla f<>lt i partitioneringsfunktionen f<>r denna lagringsmotor"
|
||||
ER_NO_PARTS_ERROR
|
||||
eng "Number of %-.64s = 0 is not an allowed value"
|
||||
ger "Eine Anzahl von %-.64s = 0 ist kein erlaubter Wert"
|
||||
swe "Antal %-.64s = 0 <20>r inte ett till<6C>ten v<>rde"
|
||||
ER_PARTITION_MGMT_ON_NONPARTITIONED
|
||||
eng "Partition management on a not partitioned table is not possible"
|
||||
ger "Partitionsverwaltung einer nicht partitionierten Tabelle ist nicht m<>glich"
|
||||
swe "Partitioneringskommando p<> en opartitionerad tabell <20>r inte m<>jligt"
|
||||
ER_FOREIGN_KEY_ON_PARTITIONED
|
||||
eng "Foreign key condition is not yet supported in conjunction with partitioning"
|
||||
ger "Fremdschl<68>ssel-Beschr<68>nkungen sind im Zusammenhang mit Partitionierung nicht zul<75>ssig"
|
||||
swe "Foreign key villkor <20>r inte <20>nnu implementerad i kombination med partitionering"
|
||||
ER_DROP_PARTITION_NON_EXISTENT
|
||||
eng "Error in list of partitions to %-.64s"
|
||||
ger "Fehler in der Partitionsliste bei %-.64s"
|
||||
swe "Fel i listan av partitioner att %-.64s"
|
||||
ER_DROP_LAST_PARTITION
|
||||
eng "Cannot remove all partitions, use DROP TABLE instead"
|
||||
ger "Es lassen sich nicht s<>mtliche Partitionen l<>schen, benutzen Sie statt dessen DROP TABLE"
|
||||
swe "Det <20>r inte till<6C>tet att ta bort alla partitioner, anv<6E>nd DROP TABLE ist<73>llet"
|
||||
ER_COALESCE_ONLY_ON_HASH_PARTITION
|
||||
eng "COALESCE PARTITION can only be used on HASH/KEY partitions"
|
||||
ger "COALESCE PARTITION kann nur auf HASH- oder KEY-Partitionen benutzt werden"
|
||||
swe "COALESCE PARTITION kan bara anv<6E>ndas p<> HASH/KEY partitioner"
|
||||
ER_REORG_HASH_ONLY_ON_SAME_NO
|
||||
eng "REORGANISE PARTITION can only be used to reorganise partitions not to change their numbers"
|
||||
ger "REORGANIZE PARTITION kann nur zur Reorganisation von Partitionen verwendet werden, nicht, um ihre Nummern zu <20>ndern"
|
||||
swe "REORGANISE PARTITION kan bara anv<6E>ndas f<>r att omorganisera partitioner, inte f<>r att <20>ndra deras antal"
|
||||
ER_REORG_NO_PARAM_ERROR
|
||||
eng "REORGANISE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs"
|
||||
ger "REORGANIZE PARTITION ohne Parameter kann nur f<>r auto-partitionierte Tabellen verwendet werden, die HASH-Partitionierung benutzen"
|
||||
swe "REORGANISE PARTITION utan parametrar kan bara anv<6E>ndas p<> auto-partitionerade tabeller som anv<6E>nder HASH partitionering"
|
||||
ER_ONLY_ON_RANGE_LIST_PARTITION
|
||||
eng "%-.64s PARTITION can only be used on RANGE/LIST partitions"
|
||||
ger "%-.64s PARTITION kann nur f<>r RANGE- oder LIST-Partitionen verwendet werden"
|
||||
swe "%-.64s PARTITION kan bara anv<6E>ndas p<> RANGE/LIST-partitioner"
|
||||
ER_ADD_PARTITION_SUBPART_ERROR
|
||||
eng "Trying to Add partition(s) with wrong number of subpartitions"
|
||||
ger "Es wurde versucht, eine oder mehrere Partitionen mit der falschen Anzahl von Unterpartitionen hinzuzuf<75>gen"
|
||||
swe "ADD PARTITION med fel antal subpartitioner"
|
||||
ER_ADD_PARTITION_NO_NEW_PARTITION
|
||||
eng "At least one partition must be added"
|
||||
ger "Es muss zumindest eine Partition hinzugef<65>gt werden"
|
||||
swe "<22>tminstone en partition m<>ste l<>ggas till vid ADD PARTITION"
|
||||
ER_COALESCE_PARTITION_NO_PARTITION
|
||||
eng "At least one partition must be coalesced"
|
||||
ger "Zumindest eine Partition muss mit COALESCE PARTITION zusammengef<65>gt werden"
|
||||
swe "<22>tminstone en partition m<>ste sl<73>s ihop vid COALESCE PARTITION"
|
||||
ER_REORG_PARTITION_NOT_EXIST
|
||||
eng "More partitions to reorganise than there are partitions"
|
||||
ger "Es wurde versucht, mehr Partitionen als vorhanden zu reorganisieren"
|
||||
swe "Fler partitioner att reorganisera <20>n det finns partitioner"
|
||||
ER_SAME_NAME_PARTITION
|
||||
eng "Duplicate partition name %-.64s"
|
||||
ger "Doppelter Partitionsname: %-.64s"
|
||||
swe "Duplicerat partitionsnamn %-.64s"
|
||||
ER_NO_BINLOG_ERROR
|
||||
eng "It is not allowed to shut off binlog on this command"
|
||||
ger "Es es nicht erlaubt, bei diesem Befehl binlog abzuschalten"
|
||||
swe "Det <20>r inte till<6C>tet att st<73>nga av binlog p<> detta kommando"
|
||||
ER_CONSECUTIVE_REORG_PARTITIONS
|
||||
eng "When reorganising a set of partitions they must be in consecutive order"
|
||||
ger "Bei der Reorganisation eines Satzes von Partitionen m<>ssen diese in geordneter Reihenfolge vorliegen"
|
||||
swe "N<>r ett antal partitioner omorganiseras m<>ste de vara i konsekutiv ordning"
|
||||
ER_REORG_OUTSIDE_RANGE
|
||||
eng "Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range"
|
||||
ger "Die Reorganisation von RANGE-Partitionen kann Gesamtbereiche nicht ver<65>ndern, mit Ausnahme der letzten Partition, die den Bereich erweitern kann"
|
||||
swe "Reorganisering av rangepartitioner kan inte <20>ndra den totala intervallet utom f<>r den sista partitionen d<>r intervallet kan ut<75>kas"
|
||||
ER_PARTITION_FUNCTION_FAILURE
|
||||
eng "Partition function not supported in this version for this handler"
|
||||
ger "Partitionsfunktion in dieser Version dieses Handlers nicht unterst<73>tzt"
|
||||
ER_PART_STATE_ERROR
|
||||
eng "Partition state cannot be defined from CREATE/ALTER TABLE"
|
||||
ger "Partitionszustand kann nicht von CREATE oder ALTER TABLE aus definiert werden"
|
||||
swe "Partition state kan inte definieras fr<66>n CREATE/ALTER TABLE"
|
||||
ER_LIMITED_PART_RANGE
|
||||
eng "The %-.64s handler only supports 32 bit integers in VALUES"
|
||||
ger "Der Handler %-.64s unterst<73>tzt in VALUES nur 32-Bit-Integers"
|
||||
swe "%-.64s st<73>djer endast 32 bitar i integers i VALUES"
|
||||
ER_PLUGIN_IS_NOT_LOADED
|
||||
eng "Plugin '%-.64s' is not loaded"
|
||||
ger "Plugin '%-.64s' ist nicht geladen"
|
||||
ER_WRONG_VALUE
|
||||
eng "Incorrect %-.32s value: '%-.128s'"
|
||||
ger "Falscher %-.32s-Wert: '%-.128s'"
|
||||
ER_NO_PARTITION_FOR_GIVEN_VALUE
|
||||
eng "Table has no partition for value %-.64s"
|
||||
ger "Tabelle hat f<>r den Wert %-.64s keine Partition"
|
||||
ER_FILEGROUP_OPTION_ONLY_ONCE
|
||||
eng "It is not allowed to specify %s more than once"
|
||||
ger "%s darf nicht mehr als einmal angegegeben werden"
|
||||
ER_CREATE_FILEGROUP_FAILED
|
||||
eng "Failed to create %s"
|
||||
ger "Anlegen von %s fehlgeschlagen"
|
||||
ER_DROP_FILEGROUP_FAILED
|
||||
eng "Failed to drop %s"
|
||||
ger "L<>schen (drop) von %s fehlgeschlagen"
|
||||
ER_TABLESPACE_AUTO_EXTEND_ERROR
|
||||
eng "The handler doesn't support autoextend of tablespaces"
|
||||
ger "Der Handler unterst<73>tzt keine automatische Erweiterung (Autoextend) von Tablespaces"
|
||||
ER_WRONG_SIZE_NUMBER
|
||||
eng "A size parameter was incorrectly specified, either number or on the form 10M"
|
||||
ger "Ein Gr<47><72>en-Parameter wurde unkorrekt angegeben, muss entweder Zahl sein oder im Format 10M"
|
||||
ER_SIZE_OVERFLOW_ERROR
|
||||
eng "The size number was correct but we don't allow the digit part to be more than 2 billion"
|
||||
ger "Die Zahl f<>r die Gr<47><72>e war korrekt, aber der Zahlanteil darf nicht gr<67><72>er als 2 Milliarden sein"
|
||||
ER_ALTER_FILEGROUP_FAILED
|
||||
eng "Failed to alter: %s"
|
||||
ger "<22>nderung von %s fehlgeschlagen"
|
||||
ER_BINLOG_ROW_LOGGING_FAILED
|
||||
eng "Writing one row to the row-based binary log failed"
|
||||
ger "Schreiben einer Zeilen ins zeilenbasierte Bin<69>rlog fehlgeschlagen"
|
||||
ER_BINLOG_ROW_WRONG_TABLE_DEF
|
||||
eng "Table definition on master and slave does not match"
|
||||
ger "Tabellendefinition auf Master und Slave stimmt nicht <20>berein"
|
||||
ER_BINLOG_ROW_RBR_TO_SBR
|
||||
eng "Slave running with --log-slave-updates must use row-based binary logging to be able to replicate row-based binary log events"
|
||||
ger "Slave, die mit --log-slave-updates laufen, m<>ssen zeilenbasiertes Loggen verwenden, um zeilenbasierte Bin<69>rlog-Ereignisse loggen zu k<>nnen"
|
||||
ER_EVENT_ALREADY_EXISTS
|
||||
eng "Event '%-.64s' already exists"
|
||||
ger "Event '%-.64s' existiert bereits"
|
||||
ER_EVENT_STORE_FAILED
|
||||
eng "Failed to store event %s. Error code %d from storage engine."
|
||||
ger "Speichern von Event %s fehlgeschlagen. Fehlercode der Speicher-Engine: %d"
|
||||
ER_EVENT_DOES_NOT_EXIST
|
||||
eng "Unknown event '%-.64s'"
|
||||
ger "Unbekanntes Event '%-.64s'"
|
||||
ER_EVENT_CANT_ALTER
|
||||
eng "Failed to alter event '%-.64s'"
|
||||
ger "<22>ndern des Events '%-.64s' fehlgeschlagen"
|
||||
ER_EVENT_DROP_FAILED
|
||||
eng "Failed to drop %s"
|
||||
ger "L<>schen von %s fehlgeschlagen"
|
||||
ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG
|
||||
eng "INTERVAL is either not positive or too big"
|
||||
ger "INTERVAL ist entweder nicht positiv oder zu gro<72>"
|
||||
ER_EVENT_ENDS_BEFORE_STARTS
|
||||
eng "ENDS is either invalid or before STARTS"
|
||||
ger "ENDS ist entweder ung<6E>ltig oder liegt vor STARTS"
|
||||
ER_EVENT_EXEC_TIME_IN_THE_PAST
|
||||
eng "Activation (AT) time is in the past"
|
||||
ger "Aktivierungszeit (AT) liegt in der Vergangenheit"
|
||||
ER_EVENT_OPEN_TABLE_FAILED
|
||||
eng "Failed to open mysql.event"
|
||||
ger "<22>ffnen von mysql.event fehlgeschlagen"
|
||||
ER_EVENT_NEITHER_M_EXPR_NOR_M_AT
|
||||
eng "No datetime expression provided"
|
||||
ger "Kein DATETIME-Ausdruck angegeben"
|
||||
ER_COL_COUNT_DOESNT_MATCH_CORRUPTED
|
||||
eng "Column count of mysql.%s is wrong. Expected %d, found %d. Table probably corrupted"
|
||||
ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d gefunden. Tabelle ist wahrscheinlich besch<63>digt"
|
||||
ER_CANNOT_LOAD_FROM_TABLE
|
||||
eng "Cannot load from mysql.%s. Table probably corrupted. See error log."
|
||||
ger "Kann mysql.%s nicht einlesen. Tabelle ist wahrscheinlich besch<63>digt, siehe Fehlerlog"
|
||||
ER_EVENT_CANNOT_DELETE
|
||||
eng "Failed to delete the event from mysql.event"
|
||||
ger "L<>schen des Events aus mysql.event fehlgeschlagen"
|
||||
ER_EVENT_COMPILE_ERROR
|
||||
eng "Error during compilation of event's body"
|
||||
ger "Fehler beim Kompilieren des Event-Bodys"
|
||||
ER_EVENT_SAME_NAME
|
||||
eng "Same old and new event name"
|
||||
ger "Alter und neuer Event-Name sind gleich"
|
||||
ER_EVENT_DATA_TOO_LONG
|
||||
eng "Data for column '%s' too long"
|
||||
ger "Daten der Spalte '%s' zu lang"
|
||||
ER_DROP_INDEX_FK
|
||||
eng "Cannot drop index '%-.64s': needed in a foreign key constraint"
|
||||
ger "Kann Index '%-.64s' nicht l<>schen: wird f<>r einen Fremdschl<68>ssel ben<65>tigt"
|
||||
ER_WARN_DEPRECATED_SYNTAX
|
||||
eng "The syntax '%s' is deprecated and will be removed in MySQL %s. Please use %s instead."
|
||||
eng "The syntax '%s' is deprecated and will be removed in MySQL %s. Please use %s instead"
|
||||
ger "Die Syntax '%s' ist veraltet und wird in MySQL %s entfernt. Bitte benutzen Sie statt dessen %s"
|
||||
ER_CANT_WRITE_LOCK_LOG_TABLE
|
||||
eng "You can't write-lock a log table. Only read access is possible."
|
||||
eng "You can't write-lock a log table. Only read access is possible"
|
||||
ger "Eine Log-Tabelle kann nicht schreibgesperrt werden. Es ist ohnehin nur Lesezugriff m<>glich"
|
||||
ER_CANT_READ_LOCK_LOG_TABLE
|
||||
eng "You can't use usual read lock with log tables. Try READ LOCAL instead."
|
||||
eng "You can't use usual read lock with log tables. Try READ LOCAL instead"
|
||||
ger "Log-Tabellen k<>nnen nicht mit normalen Lesesperren gesperrt werden. Verwenden Sie statt dessen READ LOCAL"
|
||||
ER_FOREIGN_DUPLICATE_KEY 23000 S1009
|
||||
eng "Upholding foreign key constraints for table '%.64s', entry '%-.64s', key %d would lead to a duplicate entry"
|
||||
ger "Aufrechterhalten der Fremdschl<68>ssel-Constraints f<>r Tabelle '%.64s', Eintrag '%-.64s', Schl<68>ssel %d w<>rde zu einem doppelten Eintrag f<>hren"
|
||||
ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE
|
||||
eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use scripts/mysql_fix_privilege_tables"
|
||||
ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MySQL %d, jetzt unter %d. Bitte benutzen Sie scripts/mysql_fix_privilege_tables, um den Fehler zu beheben"
|
||||
ER_REMOVED_SPACES
|
||||
eng "Leading spaces are removed from name '%s'"
|
||||
ger "F<>hrende Leerzeichen werden aus dem Namen '%s' entfernt"
|
||||
ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR
|
||||
eng "Cannot switch out of the row-based binary log format when the session has open temporary tables"
|
||||
ger "Kann nicht aus dem zeilenbasierten Bin<69>rlog-Format herauswechseln, wenn die Sitzung offene tempor<6F>re Tabellen hat"
|
||||
ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT
|
||||
eng "Cannot change the binary logging format inside a stored function or trigger"
|
||||
ger "Das Bin<69>rlog-Format kann innerhalb einer gespeicherten Funktion oder eines Triggers nicht ge<67>ndert werden"
|
||||
ER_NDB_CANT_SWITCH_BINLOG_FORMAT
|
||||
eng "The NDB cluster engine does not support changing the binlog format on the fly yet"
|
||||
ger "Die Speicher-Engine NDB Cluster unterst<73>tzt das <20>ndern des Bin<69>rlog-Formats zur Laufzeit noch nicht"
|
||||
ER_PARTITION_NO_TEMPORARY
|
||||
eng "Cannot create temporary table with partitions"
|
||||
ger "Anlegen tempor<6F>rer Tabellen mit Partitionen nicht m<>glich"
|
||||
ER_PARTITION_CONST_DOMAIN_ERROR
|
||||
eng "Partition constant is out of partition function domain"
|
||||
ger "Partitionskonstante liegt au<61>erhalb der Partitionsfunktionsdom<6F>ne"
|
||||
swe "Partitionskonstanten <20>r utanf<6E>r partitioneringsfunktionens dom<6F>n"
|
||||
ER_PARTITION_FUNCTION_IS_NOT_ALLOWED
|
||||
eng "This partition function is not allowed"
|
||||
ger "Diese Partitionierungsfunktion ist nicht erlaubt"
|
||||
swe "Denna partitioneringsfunktion <20>r inte till<6C>ten"
|
||||
ER_DDL_LOG_ERROR
|
||||
eng "Error in DDL log"
|
||||
ger "Fehler im DDL-Log"
|
||||
ER_NULL_IN_VALUES_LESS_THAN
|
||||
eng "Not allowed to use NULL value in VALUES LESS THAN"
|
||||
ger "In VALUES LESS THAN d<>rfen keine NULL-Werte verwendet werden"
|
||||
swe "Det <20>r inte till<6C>tet att anv<6E>nda NULL-v<>rden i VALUES LESS THAN"
|
||||
ER_WRONG_PARTITION_NAME
|
||||
eng "Incorrect partition name"
|
||||
ger "Falscher Partitionsname"
|
||||
swe "Felaktigt partitionsnamn"
|
||||
ER_CANT_CHANGE_TX_ISOLATION 25001
|
||||
eng "Transaction isolation level can't be changed while a transaction is in progress"
|
||||
ger "Transaktionsisolationsebene kann w<>hrend einer laufenden Transaktion nicht ge<67>ndert werden"
|
||||
ER_DUP_ENTRY_AUTOINCREMENT_CASE
|
||||
eng "ALTER TABLE causes auto_increment resequencing, resulting in duplicate entry '%-.64s' for key '%-.64s'"
|
||||
ger "ALTER TABLE f<>hrt zur Neusequenzierung von auto_increment, wodurch der doppelte Eintrag '%-.64s' f<>r Schl<68>ssel '%-.64s' auftritt"
|
||||
ER_EVENT_MODIFY_QUEUE_ERROR
|
||||
eng "Internal scheduler error %d"
|
||||
ger "Interner Scheduler-Fehler %d"
|
||||
ER_EVENT_SET_VAR_ERROR
|
||||
eng "Error during starting/stopping of the scheduler. Error code %u"
|
||||
ger "Fehler w<>hrend des Startens oder Anhalten des Schedulers. Fehlercode %u"
|
||||
ER_PARTITION_MERGE_ERROR
|
||||
eng "%s handler cannot be used in partitioned tables"
|
||||
ger "%s-Handler kann in partitionierten Tabellen nicht verwendet werden"
|
||||
swe "%s kan inte anv<6E>ndas i en partitionerad tabell"
|
||||
ER_CANT_ACTIVATE_LOG
|
||||
eng "Cannot activate '%-.64s' log."
|
||||
eng "Cannot activate '%-.64s' log"
|
||||
ger "Kann Logdatei '%-.64s' nicht aktivieren"
|
||||
ER_RBR_NOT_AVAILABLE
|
||||
eng "The server was not built with row-based replication"
|
||||
ger "Der Server hat keine zeilenbasierte Replikation"
|
||||
ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA
|
||||
eng "Triggers can not be created on system tables"
|
||||
ger "Trigger k<>nnen nicht auf Systemtabellen erzeugt werden"
|
||||
ER_CANT_ALTER_LOG_TABLE
|
||||
eng "You can't alter a log table if logging is enabled"
|
||||
ER_BAD_LOG_ENGINE
|
||||
|
@ -23,7 +23,6 @@
|
||||
*/
|
||||
|
||||
#include "mysql_priv.h"
|
||||
#include "sql_manager.h"
|
||||
|
||||
ulong volatile manager_status;
|
||||
bool volatile manager_thread_in_use;
|
||||
|
@ -1,19 +0,0 @@
|
||||
/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
#ifdef WITH_BERKELEY_STORAGE_ENGINE
|
||||
void berkeley_cleanup_log_files(void);
|
||||
#endif /* WITH_BERKELEY_STORAGE_ENGINE */
|
@ -146,7 +146,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
|
||||
%token BEFORE_SYM
|
||||
%token BEGIN_SYM
|
||||
%token BENCHMARK_SYM
|
||||
%token BERKELEY_DB_SYM
|
||||
%token BIGINT
|
||||
%token BINARY
|
||||
%token BINLOG_SYM
|
||||
@ -8354,30 +8353,6 @@ show_param:
|
||||
if (prepare_schema_table(YYTHD, lex, 0, SCH_COLLATIONS))
|
||||
YYABORT;
|
||||
}
|
||||
| BERKELEY_DB_SYM LOGS_SYM
|
||||
{
|
||||
LEX *lex= Lex;
|
||||
lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS;
|
||||
if (!(lex->create_info.db_type=
|
||||
ha_resolve_by_legacy_type(YYTHD, DB_TYPE_BERKELEY_DB)))
|
||||
{
|
||||
my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), "BerkeleyDB");
|
||||
YYABORT;
|
||||
}
|
||||
WARN_DEPRECATED(yythd, "5.2", "SHOW BDB LOGS", "'SHOW ENGINE BDB LOGS'");
|
||||
}
|
||||
| LOGS_SYM
|
||||
{
|
||||
LEX *lex= Lex;
|
||||
lex->sql_command= SQLCOM_SHOW_ENGINE_LOGS;
|
||||
if (!(lex->create_info.db_type=
|
||||
ha_resolve_by_legacy_type(YYTHD, DB_TYPE_BERKELEY_DB)))
|
||||
{
|
||||
my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), "BerkeleyDB");
|
||||
YYABORT;
|
||||
}
|
||||
WARN_DEPRECATED(yythd, "5.2", "SHOW LOGS", "'SHOW ENGINE BDB LOGS'");
|
||||
}
|
||||
| GRANTS
|
||||
{
|
||||
LEX *lex=Lex;
|
||||
@ -9408,7 +9383,6 @@ keyword_sp:
|
||||
| AUTOEXTEND_SIZE_SYM {}
|
||||
| AVG_ROW_LENGTH {}
|
||||
| AVG_SYM {}
|
||||
| BERKELEY_DB_SYM {}
|
||||
| BINLOG_SYM {}
|
||||
| BIT_SYM {}
|
||||
| BOOL_SYM {}
|
||||
|
@ -1,67 +0,0 @@
|
||||
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
|
||||
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX")
|
||||
|
||||
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/storage/bdb/build_win32
|
||||
${CMAKE_SOURCE_DIR}/storage/bdb/dbinc
|
||||
${CMAKE_SOURCE_DIR}/storage/bdb)
|
||||
|
||||
# BDB needs a number of source files that are auto-generated by the unix
|
||||
# configure. So to build BDB, it is necessary to copy these over to the Windows
|
||||
# bitkeeper tree, or to use a source .tar.gz package which already has these
|
||||
# files.
|
||||
ADD_LIBRARY(bdb crypto/aes_method.c btree/bt_compact.c btree/bt_compare.c
|
||||
btree/bt_conv.c btree/bt_curadj.c btree/bt_cursor.c
|
||||
btree/bt_delete.c btree/bt_method.c btree/bt_open.c btree/bt_put.c
|
||||
btree/bt_rec.c btree/bt_reclaim.c btree/bt_recno.c
|
||||
btree/bt_rsearch.c btree/bt_search.c btree/bt_split.c
|
||||
btree/bt_stat.c btree/bt_upgrade.c btree/bt_verify.c
|
||||
btree/btree_auto.c db/crdel_auto.c db/crdel_rec.c crypto/crypto.c
|
||||
db/db.c db/db_am.c db/db_auto.c common/db_byteorder.c db/db_cam.c
|
||||
common/db_clock.c db/db_conv.c db/db_dispatch.c db/db_dup.c
|
||||
common/db_err.c common/db_getlong.c common/db_idspace.c
|
||||
db/db_iface.c db/db_join.c common/db_log2.c db/db_meta.c
|
||||
db/db_method.c db/db_open.c db/db_overflow.c db/db_ovfl_vrfy.c
|
||||
db/db_pr.c db/db_rec.c db/db_reclaim.c db/db_remove.c
|
||||
db/db_rename.c db/db_ret.c env/db_salloc.c db/db_setid.c
|
||||
db/db_setlsn.c env/db_shash.c db/db_stati.c db/db_truncate.c
|
||||
db/db_upg.c db/db_upg_opd.c db/db_vrfy.c db/db_vrfyutil.c
|
||||
dbm/dbm.c dbreg/dbreg.c dbreg/dbreg_auto.c dbreg/dbreg_rec.c
|
||||
dbreg/dbreg_stat.c dbreg/dbreg_util.c env/env_failchk.c
|
||||
env/env_file.c env/env_method.c env/env_open.c env/env_recover.c
|
||||
env/env_region.c env/env_register.c env/env_stat.c
|
||||
fileops/fileops_auto.c fileops/fop_basic.c fileops/fop_rec.c
|
||||
fileops/fop_util.c hash/hash.c hash/hash_auto.c hash/hash_conv.c
|
||||
hash/hash_dup.c hash/hash_func.c hash/hash_meta.c
|
||||
hash/hash_method.c hash/hash_open.c hash/hash_page.c
|
||||
hash/hash_rec.c hash/hash_reclaim.c hash/hash_stat.c
|
||||
hash/hash_upgrade.c hash/hash_verify.c hmac/hmac.c
|
||||
hsearch/hsearch.c lock/lock.c lock/lock_deadlock.c
|
||||
lock/lock_failchk.c lock/lock_id.c lock/lock_list.c
|
||||
lock/lock_method.c lock/lock_region.c lock/lock_stat.c
|
||||
lock/lock_timer.c lock/lock_util.c log/log.c log/log_archive.c
|
||||
log/log_compare.c log/log_debug.c log/log_get.c log/log_method.c
|
||||
log/log_put.c log/log_stat.c mp/mp_alloc.c mp/mp_bh.c mp/mp_fget.c
|
||||
mp/mp_fmethod.c mp/mp_fopen.c mp/mp_fput.c mp/mp_fset.c
|
||||
mp/mp_method.c mp/mp_region.c mp/mp_register.c mp/mp_stat.c
|
||||
mp/mp_sync.c mp/mp_trickle.c crypto/mersenne/mt19937db.c
|
||||
mutex/mut_alloc.c mutex/mut_method.c mutex/mut_region.c
|
||||
mutex/mut_stat.c mutex/mut_tas.c mutex/mut_win32.c
|
||||
os_win32/os_abs.c os/os_alloc.c os_win32/os_clock.c
|
||||
os_win32/os_config.c os_win32/os_dir.c os_win32/os_errno.c
|
||||
os_win32/os_fid.c os_win32/os_flock.c os_win32/os_fsync.c
|
||||
os_win32/os_handle.c os/os_id.c os_win32/os_map.c os/os_method.c
|
||||
os/os_oflags.c os_win32/os_open.c os/os_region.c
|
||||
os_win32/os_rename.c os/os_root.c os/os_rpath.c os_win32/os_rw.c
|
||||
os_win32/os_seek.c os_win32/os_sleep.c os_win32/os_spin.c
|
||||
os_win32/os_stat.c os/os_tmpdir.c os_win32/os_truncate.c
|
||||
os/os_unlink.c qam/qam.c qam/qam_auto.c qam/qam_conv.c
|
||||
qam/qam_files.c qam/qam_method.c qam/qam_open.c qam/qam_rec.c
|
||||
qam/qam_stat.c qam/qam_upgrade.c qam/qam_verify.c rep/rep_auto.c
|
||||
rep/rep_backup.c rep/rep_elect.c rep/rep_log.c rep/rep_method.c
|
||||
rep/rep_record.c rep/rep_region.c rep/rep_stat.c rep/rep_stub.c
|
||||
rep/rep_util.c rep/rep_verify.c crypto/rijndael/rijndael-alg-fst.c
|
||||
crypto/rijndael/rijndael-api-fst.c hmac/sha1.c clib/strcasecmp.c
|
||||
txn/txn.c txn/txn_auto.c txn/txn_chkpt.c txn/txn_failchk.c
|
||||
txn/txn_method.c txn/txn_rec.c txn/txn_recover.c txn/txn_region.c
|
||||
txn/txn_stat.c txn/txn_util.c common/util_log.c common/util_sig.c
|
||||
xa/xa.c xa/xa_db.c xa/xa_map.c)
|
@ -1,102 +0,0 @@
|
||||
/*-
|
||||
* $Id: LICENSE,v 12.1 2005/06/16 20:20:10 bostic Exp $
|
||||
*/
|
||||
|
||||
The following is the license that applies to this copy of the Berkeley DB
|
||||
software. For a license to use the Berkeley DB software under conditions
|
||||
other than those described here, or to purchase support for this software,
|
||||
please contact Sleepycat Software by email at info@sleepycat.com, or on
|
||||
the Web at http://www.sleepycat.com.
|
||||
|
||||
=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||
/*
|
||||
* Copyright (c) 1990-2005
|
||||
* Sleepycat Software. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Redistributions in any form must be accompanied by information on
|
||||
* how to obtain complete source code for the DB software and any
|
||||
* accompanying software that uses the DB software. The source code
|
||||
* must either be included in the distribution or be available for no
|
||||
* more than the cost of distribution plus a nominal fee, and must be
|
||||
* freely redistributable under reasonable conditions. For an
|
||||
* executable file, complete source code means the source code for all
|
||||
* modules it contains. It does not include source code for modules or
|
||||
* files that typically accompany the major components of the operating
|
||||
* system on which the executable file runs.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY SLEEPYCAT SOFTWARE ``AS IS'' AND ANY EXPRESS
|
||||
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
|
||||
* NON-INFRINGEMENT, ARE DISCLAIMED. IN NO EVENT SHALL SLEEPYCAT SOFTWARE
|
||||
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 1990, 1993, 1994, 1995
|
||||
* The Regents of the University of California. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 1995, 1996
|
||||
* The President and Fellows of Harvard University. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Neither the name of the University nor the names of its contributors
|
||||
* may be used to endorse or promote products derived from this software
|
||||
* without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY HARVARD AND ITS CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL HARVARD OR ITS CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
@ -1,56 +0,0 @@
|
||||
# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation; either version 2 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
# Adaptor makefile to translate between what automake expects and what
|
||||
# BDB provides (or vice versa).
|
||||
|
||||
srcdir = @srcdir@
|
||||
top_srcdir = @top_srcdir@
|
||||
|
||||
# distdir and top_distdir are set by the calling Makefile
|
||||
|
||||
bdb_build = build_unix
|
||||
files = LICENSE Makefile Makefile.in README CMakeLists.txt
|
||||
subdirs = btree build_win32 clib common cxx db dbinc \
|
||||
dbinc_auto db185 db_archive db_checkpoint db_deadlock db_dump \
|
||||
db_dump185 db_hotbackup db_load db_printlog db_recover db_stat db_upgrade \
|
||||
db_verify dbm dbreg dist env fileops hash \
|
||||
hsearch hmac include lock log mp mutex os \
|
||||
os_win32 qam rep txn xa sequence crypto
|
||||
|
||||
@SET_MAKE@
|
||||
|
||||
all:
|
||||
cd $(bdb_build) && $(MAKE) all
|
||||
|
||||
clean:
|
||||
cd $(bdb_build) && $(MAKE) clean
|
||||
|
||||
distclean:
|
||||
cd $(bdb_build) && $(MAKE) distclean
|
||||
|
||||
# May want to fix this, and MYSQL/configure, to install things
|
||||
install dvi check installcheck:
|
||||
|
||||
distdir:
|
||||
for s in $(subdirs); do \
|
||||
cp -pr $(srcdir)/$$s $(distdir)/$$s; \
|
||||
done
|
||||
for f in $(files); do \
|
||||
test -f $(distdir)/$$f || cp -p $(srcdir)/$$f $(distdir)/$$f; \
|
||||
done
|
||||
mkdir $(distdir)/$(bdb_build)
|
||||
cp -p $(srcdir)/$(bdb_build)/.IGNORE_ME $(distdir)/$(bdb_build)
|
@ -1108,7 +1108,7 @@ int ha_tina::rnd_pos(byte * buf, byte *pos)
|
||||
{
|
||||
DBUG_ENTER("ha_tina::rnd_pos");
|
||||
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
|
||||
current_position= my_get_ptr(pos,ref_length);
|
||||
current_position= (off_t)my_get_ptr(pos,ref_length);
|
||||
DBUG_RETURN(find_current_row(buf));
|
||||
}
|
||||
|
||||
|
@ -280,7 +280,7 @@ static int ft_add_word(MYSQL_FTPARSER_PARAM *param,
|
||||
|
||||
|
||||
static int ft_parse_internal(MYSQL_FTPARSER_PARAM *param,
|
||||
byte *doc, int doc_len)
|
||||
char *doc, int doc_len)
|
||||
{
|
||||
byte *end=doc+doc_len;
|
||||
MY_FT_PARSER_PARAM *ft_param=param->mysql_ftparam;
|
||||
|
@ -65,7 +65,7 @@ for batch in t/* ; do
|
||||
done
|
||||
|
||||
echo "=====================================" >> var/ft_test.log
|
||||
$MYSQLD $OPTS --basedir=$BASE --skip-bdb --pid-file=$PID \
|
||||
$MYSQLD $OPTS --basedir=$BASE --pid-file=$PID \
|
||||
--language=$ROOT/sql/share/english \
|
||||
--skip-grant-tables --skip-innodb \
|
||||
--skip-networking --tmpdir=$DATA >> var/ft_test.log 2>&1 &
|
||||
|
@ -22,7 +22,6 @@
|
||||
int mi_delete_all_rows(MI_INFO *info)
|
||||
{
|
||||
uint i;
|
||||
char buf[22];
|
||||
MYISAM_SHARE *share=info->s;
|
||||
MI_STATE_INFO *state=&share->state;
|
||||
DBUG_ENTER("mi_delete_all_rows");
|
||||
|
@ -1178,7 +1178,6 @@ static int _mi_read_rnd_mempack_record(MI_INFO*, byte *,my_off_t, my_bool);
|
||||
|
||||
my_bool _mi_memmap_file(MI_INFO *info)
|
||||
{
|
||||
byte *file_map;
|
||||
MYISAM_SHARE *share=info->s;
|
||||
DBUG_ENTER("mi_memmap_file");
|
||||
|
||||
|
@ -357,14 +357,6 @@ myisam_repair_threads = 1
|
||||
myisam_recover
|
||||
|
||||
|
||||
# *** BDB Specific options ***
|
||||
|
||||
# Use this option if you run a MySQL server with BDB support enabled but
|
||||
# you do not plan to use it. This will save memory and may speed up some
|
||||
# things.
|
||||
skip-bdb
|
||||
|
||||
|
||||
# *** INNODB Specific options ***
|
||||
|
||||
# Use this option if you have a MySQL server with InnoDB support enabled
|
||||
|
@ -48,9 +48,6 @@ server-id = 1
|
||||
# Uncomment the following if you want to log updates
|
||||
#log-bin=mysql-bin
|
||||
|
||||
# Uncomment the following if you are NOT using BDB tables
|
||||
#skip-bdb
|
||||
|
||||
# Uncomment the following if you are using InnoDB tables
|
||||
#innodb_data_home_dir = @localstatedir@/
|
||||
#innodb_data_file_path = ibdata1:10M:autoextend
|
||||
|
@ -37,3 +37,10 @@ directory and add the following to the Makefile.am in that directory
|
||||
Note, it's important to have "-t" at the end of the filename, otherwise the
|
||||
test won't be executed by 'make test' !
|
||||
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
There is Doxygen-generated documentation available at:
|
||||
|
||||
https://intranet.mysql.com/~mkindahl/mytap/html/
|
||||
|
@ -1,19 +1,5 @@
|
||||
#!/usr/bin/perl
|
||||
|
||||
# Override _command_line in the standard Perl test harness to prevent
|
||||
# it from using "perl" to run the test scripts.
|
||||
package MySQL::Straps;
|
||||
|
||||
use base qw(Test::Harness::Straps);
|
||||
|
||||
use strict;
|
||||
|
||||
sub _command_line {
|
||||
return $_[1]
|
||||
}
|
||||
|
||||
package main;
|
||||
|
||||
use Test::Harness qw(&runtests $verbose);
|
||||
use File::Find;
|
||||
|
||||
@ -37,9 +23,6 @@ unit - Run unit tests in directory
|
||||
|
||||
my $cmd = shift;
|
||||
|
||||
# $Test::Harness::Verbose = 1;
|
||||
# $Test::Harness::Debug = 1;
|
||||
|
||||
if (defined $cmd && exists $dispatch{$cmd}) {
|
||||
$dispatch{$cmd}->(@ARGV);
|
||||
} else {
|
||||
@ -95,14 +78,7 @@ sub run_cmd (@) {
|
||||
if (@files > 0) {
|
||||
# Removing the first './' from the file names
|
||||
foreach (@files) { s!^\./!! }
|
||||
|
||||
# Install the strap above instead of the default strap. Since
|
||||
# we are replacing the straps under the feet of Test::Harness,
|
||||
# we need to do some basic initializations in the new straps.
|
||||
$Test::Harness::Strap = MySQL::Straps->new;
|
||||
$Test::Harness::Strap->{callback} = \&Test::Harness::strap_callback
|
||||
if defined &Test::Harness::strap_callback;
|
||||
|
||||
$ENV{'HARNESS_PERL_SWITCHES'} .= q" -e 'exec @ARGV'";
|
||||
runtests @files;
|
||||
}
|
||||
}
|
||||
|
@ -39,7 +39,6 @@ The options right now are
|
||||
WITH_INNOBASE_STORAGE_ENGINE Enable particular storage engines
|
||||
WITH_PARTITION_STORAGE_ENGINE
|
||||
WITH_ARCHIVE_STORAGE_ENGINE
|
||||
WITH_BERKELEY_STORAGE_ENGINE
|
||||
WITH_BLACKHOLE_STORAGE_ENGINE
|
||||
WITH_EXAMPLE_STORAGE_ENGINE
|
||||
WITH_FEDERATED_STORAGE_ENGINE
|
||||
|
@ -24,7 +24,6 @@ try
|
||||
switch (parts[0])
|
||||
{
|
||||
case "WITH_ARCHIVE_STORAGE_ENGINE":
|
||||
case "WITH_BERKELEY_STORAGE_ENGINE":
|
||||
case "WITH_BLACKHOLE_STORAGE_ENGINE":
|
||||
case "WITH_EXAMPLE_STORAGE_ENGINE":
|
||||
case "WITH_FEDERATED_STORAGE_ENGINE":
|
||||
@ -66,8 +65,6 @@ try
|
||||
|
||||
configfile.Close();
|
||||
|
||||
//ConfigureBDB();
|
||||
|
||||
fso = null;
|
||||
|
||||
WScript.Echo("done!");
|
||||
@ -135,32 +132,3 @@ function GetVersionId(version)
|
||||
id += build;
|
||||
return id;
|
||||
}
|
||||
|
||||
function ConfigureBDB()
|
||||
{
|
||||
// read in the Unix configure.in file
|
||||
var dbIncTS = fso.OpenTextFile("..\\bdb\\dbinc\\db.in", ForReading);
|
||||
var dbIn = dbIncTS.ReadAll();
|
||||
dbIncTS.Close();
|
||||
|
||||
dbIn = dbIn.replace("@DB_VERSION_MAJOR@", "$DB_VERSION_MAJOR");
|
||||
dbIn = dbIn.replace("@DB_VERSION_MINOR@", "$DB_VERSION_MINOR");
|
||||
dbIn = dbIn.replace("@DB_VERSION_PATCH@", "$DB_VERSION_PATCH");
|
||||
dbIn = dbIn.replace("@DB_VERSION_STRING@", "$DB_VERSION_STRING");
|
||||
|
||||
dbIn = dbIn.replace("@u_int8_decl@", "typedef unsigned char u_int8_t;");
|
||||
dbIn = dbIn.replace("@int16_decl@", "typedef short int16_t;");
|
||||
dbIn = dbIn.replace("@u_int16_decl@", "typedef unsigned short u_int16_t;");
|
||||
dbIn = dbIn.replace("@int32_decl@", "typedef int int32_t;");
|
||||
dbIn = dbIn.replace("@u_int32_decl@", "typedef unsigned int u_int32_t;");
|
||||
|
||||
dbIn = dbIn.replace("@u_char_decl@", "{\r\n#if !defined(_WINSOCKAPI_)\r\n" +
|
||||
"typedef unsigned char u_char;");
|
||||
dbIn = dbIn.replace("@u_short_decl@", "typedef unsigned short u_short;");
|
||||
dbIn = dbIn.replace("@u_int_decl@", "typedef unsigned int u_int;");
|
||||
dbIn = dbIn.replace("@u_long_decl@", "typedef unsigned long u_long;");
|
||||
|
||||
dbIn = dbIn.replace("@ssize_t_decl@", "#endif\r\n#if defined(_WIN64)\r\n" +
|
||||
"typedef __int64 ssize_t;\r\n#else\r\n" +
|
||||
"typedef int ssize_t;\r\n#endif");
|
||||
}
|
||||
|
Reference in New Issue
Block a user