mirror of
https://github.com/MariaDB/server.git
synced 2025-07-17 12:02:09 +03:00
Additional storage engine called "blackhole". Customer request, and for that matter a Zawodny request. With this you can alter table to a type of table that would never store data. Its a /dev/null for a database.
This commit is contained in:
30
acinclude.m4
30
acinclude.m4
@ -1510,6 +1510,36 @@ dnl ---------------------------------------------------------------------------
|
|||||||
dnl END OF MYSQL_CHECK_EXAMPLE SECTION
|
dnl END OF MYSQL_CHECK_EXAMPLE SECTION
|
||||||
dnl ---------------------------------------------------------------------------
|
dnl ---------------------------------------------------------------------------
|
||||||
|
|
||||||
|
dnl ---------------------------------------------------------------------------
|
||||||
|
dnl Macro: MYSQL_CHECK_BLACKHOLEDB
|
||||||
|
dnl Sets HAVE_BLACKHOLE_DB if --with-blackhole-storage-engine is used
|
||||||
|
dnl ---------------------------------------------------------------------------
|
||||||
|
AC_DEFUN([MYSQL_CHECK_BLACKHOLEDB], [
|
||||||
|
AC_ARG_WITH([blackhole-storage-engine],
|
||||||
|
[
|
||||||
|
--with-blackhole-storage-engine
|
||||||
|
Enable the Blackhole Storage Engine],
|
||||||
|
[blackholedb="$withval"],
|
||||||
|
[blackholedb=no])
|
||||||
|
AC_MSG_CHECKING([for blackhole storage engine])
|
||||||
|
|
||||||
|
case "$blackholedb" in
|
||||||
|
yes )
|
||||||
|
AC_DEFINE([HAVE_BLACKHOLE_DB], [1], [Builds Blackhole DB])
|
||||||
|
AC_MSG_RESULT([yes])
|
||||||
|
[blackholedb=yes]
|
||||||
|
;;
|
||||||
|
* )
|
||||||
|
AC_MSG_RESULT([no])
|
||||||
|
[blackholedb=no]
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
])
|
||||||
|
dnl ---------------------------------------------------------------------------
|
||||||
|
dnl END OF MYSQL_CHECK_BLACKHOLE SECTION
|
||||||
|
dnl ---------------------------------------------------------------------------
|
||||||
|
|
||||||
dnl ---------------------------------------------------------------------------
|
dnl ---------------------------------------------------------------------------
|
||||||
dnl Macro: MYSQL_CHECK_ARCHIVEDB
|
dnl Macro: MYSQL_CHECK_ARCHIVEDB
|
||||||
dnl Sets HAVE_ARCHIVE_DB if --with-archive-storage-engine is used
|
dnl Sets HAVE_ARCHIVE_DB if --with-archive-storage-engine is used
|
||||||
|
@ -2835,6 +2835,7 @@ MYSQL_CHECK_INNODB
|
|||||||
MYSQL_CHECK_EXAMPLEDB
|
MYSQL_CHECK_EXAMPLEDB
|
||||||
MYSQL_CHECK_ARCHIVEDB
|
MYSQL_CHECK_ARCHIVEDB
|
||||||
MYSQL_CHECK_CSVDB
|
MYSQL_CHECK_CSVDB
|
||||||
|
MYSQL_CHECK_BLACKHOLEDB
|
||||||
MYSQL_CHECK_NDBCLUSTER
|
MYSQL_CHECK_NDBCLUSTER
|
||||||
|
|
||||||
# If we have threads generate some library functions and test programs
|
# If we have threads generate some library functions and test programs
|
||||||
|
4
mysql-test/include/have_blackhole.inc
Normal file
4
mysql-test/include/have_blackhole.inc
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
-- require r/have_blackhole.require
|
||||||
|
disable_query_log;
|
||||||
|
show variables like "have_blackhole_engine";
|
||||||
|
enable_query_log;
|
83
mysql-test/r/blackhole.result
Normal file
83
mysql-test/r/blackhole.result
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
drop table if exists t1,t2;
|
||||||
|
CREATE TABLE t1 (
|
||||||
|
Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL,
|
||||||
|
Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL
|
||||||
|
) ENGINE=blackhole;
|
||||||
|
INSERT INTO t1 VALUES (9410,9412);
|
||||||
|
select period from t1;
|
||||||
|
period
|
||||||
|
select * from t1;
|
||||||
|
Period Varor_period
|
||||||
|
select t1.* from t1;
|
||||||
|
Period Varor_period
|
||||||
|
CREATE TABLE t2 (
|
||||||
|
auto int NOT NULL auto_increment,
|
||||||
|
fld1 int(6) unsigned zerofill DEFAULT '000000' NOT NULL,
|
||||||
|
companynr tinyint(2) unsigned zerofill DEFAULT '00' NOT NULL,
|
||||||
|
fld3 char(30) DEFAULT '' NOT NULL,
|
||||||
|
fld4 char(35) DEFAULT '' NOT NULL,
|
||||||
|
fld5 char(35) DEFAULT '' NOT NULL,
|
||||||
|
fld6 char(4) DEFAULT '' NOT NULL,
|
||||||
|
primary key (auto)
|
||||||
|
) ENGINE=blackhole;
|
||||||
|
select t2.fld3 from t2 where companynr = 58 and fld3 like "%imaginable%";
|
||||||
|
fld3
|
||||||
|
select fld3 from t2 where fld3 like "%cultivation" ;
|
||||||
|
fld3
|
||||||
|
select t2.fld3,companynr from t2 where companynr = 57+1 order by fld3;
|
||||||
|
fld3 companynr
|
||||||
|
select fld3,companynr from t2 where companynr = 58 order by fld3;
|
||||||
|
fld3 companynr
|
||||||
|
select fld3 from t2 order by fld3 desc limit 10;
|
||||||
|
fld3
|
||||||
|
select fld3 from t2 order by fld3 desc limit 5;
|
||||||
|
fld3
|
||||||
|
select fld3 from t2 order by fld3 desc limit 5,5;
|
||||||
|
fld3
|
||||||
|
select t2.fld3 from t2 where fld3 = 'honeysuckle';
|
||||||
|
fld3
|
||||||
|
select t2.fld3 from t2 where fld3 LIKE 'honeysuckl_';
|
||||||
|
fld3
|
||||||
|
select t2.fld3 from t2 where fld3 LIKE 'hon_ysuckl_';
|
||||||
|
fld3
|
||||||
|
select t2.fld3 from t2 where fld3 LIKE 'honeysuckle%';
|
||||||
|
fld3
|
||||||
|
select t2.fld3 from t2 where fld3 LIKE 'h%le';
|
||||||
|
fld3
|
||||||
|
select t2.fld3 from t2 where fld3 LIKE 'honeysuckle_';
|
||||||
|
fld3
|
||||||
|
select t2.fld3 from t2 where fld3 LIKE 'don_t_find_me_please%';
|
||||||
|
fld3
|
||||||
|
select t2.fld3 from t2 where fld3 >= 'honeysuckle' and fld3 <= 'honoring' order by fld3;
|
||||||
|
fld3
|
||||||
|
select fld1,fld3 from t2 where fld3="Colombo" or fld3 = "nondecreasing" order by fld3;
|
||||||
|
fld1 fld3
|
||||||
|
DROP TABLE t1;
|
||||||
|
CREATE TABLE t1 (a VARCHAR(200), b TEXT, FULLTEXT (a,b));
|
||||||
|
INSERT INTO t1 VALUES('MySQL has now support', 'for full-text search'),
|
||||||
|
('Full-text indexes', 'are called collections'),
|
||||||
|
('Only MyISAM tables','support collections'),
|
||||||
|
('Function MATCH ... AGAINST()','is used to do a search'),
|
||||||
|
('Full-text search in MySQL', 'implements vector space model');
|
||||||
|
SHOW INDEX FROM t1;
|
||||||
|
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment
|
||||||
|
t1 1 a 1 a NULL NULL NULL NULL YES FULLTEXT
|
||||||
|
t1 1 a 2 b NULL NULL NULL NULL YES FULLTEXT
|
||||||
|
select * from t1 where MATCH(a,b) AGAINST ("collections");
|
||||||
|
a b
|
||||||
|
Only MyISAM tables support collections
|
||||||
|
Full-text indexes are called collections
|
||||||
|
explain extended select * from t1 where MATCH(a,b) AGAINST ("collections");
|
||||||
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
|
1 SIMPLE t1 fulltext a a 0 1 Using where
|
||||||
|
Warnings:
|
||||||
|
Note 1003 select test.t1.a AS `a`,test.t1.b AS `b` from test.t1 where (match test.t1.a,test.t1.b against (_latin1'collections'))
|
||||||
|
select * from t1 where MATCH(a,b) AGAINST ("indexes");
|
||||||
|
a b
|
||||||
|
Full-text indexes are called collections
|
||||||
|
select * from t1 where MATCH(a,b) AGAINST ("indexes collections");
|
||||||
|
a b
|
||||||
|
Full-text indexes are called collections
|
||||||
|
Only MyISAM tables support collections
|
||||||
|
select * from t1 where MATCH(a,b) AGAINST ("only");
|
||||||
|
a b
|
2
mysql-test/r/have_blackhole.require
Normal file
2
mysql-test/r/have_blackhole.require
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
Variable_name Value
|
||||||
|
have_blackhole_engine YES
|
1299
mysql-test/t/blackhole.test
Normal file
1299
mysql-test/t/blackhole.test
Normal file
File diff suppressed because it is too large
Load Diff
@ -60,7 +60,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \
|
|||||||
stacktrace.h sql_sort.h sql_cache.h set_var.h \
|
stacktrace.h sql_sort.h sql_cache.h set_var.h \
|
||||||
spatial.h gstream.h client_settings.h tzfile.h \
|
spatial.h gstream.h client_settings.h tzfile.h \
|
||||||
tztime.h examples/ha_example.h examples/ha_archive.h \
|
tztime.h examples/ha_example.h examples/ha_archive.h \
|
||||||
examples/ha_tina.h
|
examples/ha_tina.h ha_blackhole.h
|
||||||
mysqld_SOURCES = sql_lex.cc sql_handler.cc \
|
mysqld_SOURCES = sql_lex.cc sql_handler.cc \
|
||||||
item.cc item_sum.cc item_buff.cc item_func.cc \
|
item.cc item_sum.cc item_buff.cc item_func.cc \
|
||||||
item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \
|
item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \
|
||||||
@ -92,7 +92,7 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \
|
|||||||
gstream.cc spatial.cc sql_help.cc protocol_cursor.cc \
|
gstream.cc spatial.cc sql_help.cc protocol_cursor.cc \
|
||||||
tztime.cc my_time.c \
|
tztime.cc my_time.c \
|
||||||
examples/ha_example.cc examples/ha_archive.cc \
|
examples/ha_example.cc examples/ha_archive.cc \
|
||||||
examples/ha_tina.cc
|
examples/ha_tina.cc ha_blackhole.cc
|
||||||
|
|
||||||
gen_lex_hash_SOURCES = gen_lex_hash.cc
|
gen_lex_hash_SOURCES = gen_lex_hash.cc
|
||||||
gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS)
|
gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS)
|
||||||
|
164
sql/ha_blackhole.cc
Normal file
164
sql/ha_blackhole.cc
Normal file
@ -0,0 +1,164 @@
|
|||||||
|
/* Copyright (C) 2005 MySQL AB
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef __GNUC__
|
||||||
|
#pragma implementation // gcc: Class implementation
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <mysql_priv.h>
|
||||||
|
|
||||||
|
#ifdef HAVE_BLACKHOLE_DB
|
||||||
|
#include "ha_blackhole.h"
|
||||||
|
|
||||||
|
|
||||||
|
const char **ha_blackhole::bas_ext() const
|
||||||
|
{
|
||||||
|
static const char *ext[]= { NullS };
|
||||||
|
return ext;
|
||||||
|
}
|
||||||
|
|
||||||
|
int ha_blackhole::open(const char *name, int mode, uint test_if_locked)
|
||||||
|
{
|
||||||
|
DBUG_ENTER("ha_blackhole::open");
|
||||||
|
thr_lock_init(&thr_lock);
|
||||||
|
thr_lock_data_init(&thr_lock,&lock,NULL);
|
||||||
|
DBUG_RETURN(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
int ha_blackhole::close(void)
|
||||||
|
{
|
||||||
|
DBUG_ENTER("ha_blackhole::close");
|
||||||
|
thr_lock_delete(&thr_lock);
|
||||||
|
DBUG_RETURN(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
int ha_blackhole::create(const char *name, TABLE *table_arg,
|
||||||
|
HA_CREATE_INFO *create_info)
|
||||||
|
{
|
||||||
|
DBUG_ENTER("ha_blackhole::create");
|
||||||
|
DBUG_RETURN(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *ha_blackhole::index_type(uint key_number)
|
||||||
|
{
|
||||||
|
DBUG_ENTER("ha_blackhole::index_type");
|
||||||
|
DBUG_RETURN((table->key_info[key_number].flags & HA_FULLTEXT) ?
|
||||||
|
"FULLTEXT" :
|
||||||
|
(table->key_info[key_number].flags & HA_SPATIAL) ?
|
||||||
|
"SPATIAL" :
|
||||||
|
(table->key_info[key_number].algorithm == HA_KEY_ALG_RTREE) ?
|
||||||
|
"RTREE" :
|
||||||
|
"BTREE");
|
||||||
|
}
|
||||||
|
|
||||||
|
int ha_blackhole::write_row(byte * buf)
|
||||||
|
{
|
||||||
|
DBUG_ENTER("ha_blackhole::write_row");
|
||||||
|
DBUG_RETURN(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
int ha_blackhole::rnd_init(bool scan)
|
||||||
|
{
|
||||||
|
DBUG_ENTER("ha_blackhole::rnd_init");
|
||||||
|
DBUG_RETURN(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int ha_blackhole::rnd_next(byte *buf)
|
||||||
|
{
|
||||||
|
DBUG_ENTER("ha_blackhole::rnd_next");
|
||||||
|
DBUG_RETURN(HA_ERR_END_OF_FILE);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int ha_blackhole::rnd_pos(byte * buf, byte *pos)
|
||||||
|
{
|
||||||
|
DBUG_ENTER("ha_blackhole::rnd_pos");
|
||||||
|
DBUG_RETURN(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void ha_blackhole::position(const byte *record)
|
||||||
|
{
|
||||||
|
DBUG_ENTER("ha_blackhole::position");
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void ha_blackhole::info(uint flag)
|
||||||
|
{
|
||||||
|
DBUG_ENTER("ha_blackhole::info");
|
||||||
|
|
||||||
|
records= 0;
|
||||||
|
deleted= 0;
|
||||||
|
errkey= 0;
|
||||||
|
mean_rec_length= 0;
|
||||||
|
data_file_length= 0;
|
||||||
|
index_file_length= 0;
|
||||||
|
max_data_file_length= 0;
|
||||||
|
delete_length= 0;
|
||||||
|
if (flag & HA_STATUS_AUTO)
|
||||||
|
auto_increment_value= 1;
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
|
}
|
||||||
|
|
||||||
|
int ha_blackhole::external_lock(THD *thd, int lock_type)
|
||||||
|
{
|
||||||
|
DBUG_ENTER("ha_blackhole::external_lock");
|
||||||
|
DBUG_RETURN(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
THR_LOCK_DATA **ha_blackhole::store_lock(THD *thd,
|
||||||
|
THR_LOCK_DATA **to,
|
||||||
|
enum thr_lock_type lock_type)
|
||||||
|
{
|
||||||
|
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
Here is where we get into the guts of a row level lock.
|
||||||
|
If TL_UNLOCK is set
|
||||||
|
If we are not doing a LOCK TABLE or DISCARD/IMPORT
|
||||||
|
TABLESPACE, then allow multiple writers
|
||||||
|
*/
|
||||||
|
|
||||||
|
if ((lock_type >= TL_WRITE_CONCURRENT_INSERT &&
|
||||||
|
lock_type <= TL_WRITE) && !thd->in_lock_tables
|
||||||
|
&& !thd->tablespace_op)
|
||||||
|
lock_type = TL_WRITE_ALLOW_WRITE;
|
||||||
|
|
||||||
|
/*
|
||||||
|
In queries of type INSERT INTO t1 SELECT ... FROM t2 ...
|
||||||
|
MySQL would use the lock TL_READ_NO_INSERT on t2, and that
|
||||||
|
would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts
|
||||||
|
to t2. Convert the lock to a normal read lock to allow
|
||||||
|
concurrent inserts to t2.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables)
|
||||||
|
lock_type = TL_READ;
|
||||||
|
|
||||||
|
lock.type=lock_type;
|
||||||
|
}
|
||||||
|
|
||||||
|
*to++= &lock;
|
||||||
|
|
||||||
|
return to;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#endif /* HAVE_BLACKHOLE_DB */
|
79
sql/ha_blackhole.h
Normal file
79
sql/ha_blackhole.h
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
/* Copyright (C) 2005 MySQL AB
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||||
|
|
||||||
|
#ifdef __GNUC__
|
||||||
|
#pragma interface /* gcc class implementation */
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
Class definition for the blackhole storage engine
|
||||||
|
"Dumbest named feature ever"
|
||||||
|
*/
|
||||||
|
class ha_blackhole: public handler
|
||||||
|
{
|
||||||
|
THR_LOCK_DATA lock; /* MySQL lock */
|
||||||
|
THR_LOCK thr_lock;
|
||||||
|
|
||||||
|
public:
|
||||||
|
ha_blackhole(TABLE *table): handler(table)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
~ha_blackhole()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
/* The name that will be used for display purposes */
|
||||||
|
const char *table_type() const { return "BLACKHOLE"; }
|
||||||
|
/*
|
||||||
|
The name of the index type that will be used for display
|
||||||
|
don't implement this method unless you really have indexes
|
||||||
|
*/
|
||||||
|
const char *index_type(uint key_number);
|
||||||
|
const char **bas_ext() const;
|
||||||
|
ulong table_flags() const
|
||||||
|
{
|
||||||
|
return(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER |
|
||||||
|
HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY |
|
||||||
|
HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME |
|
||||||
|
HA_CAN_INSERT_DELAYED);
|
||||||
|
}
|
||||||
|
ulong index_flags(uint inx, uint part, bool all_parts) const
|
||||||
|
{
|
||||||
|
return ((table->key_info[inx].algorithm == HA_KEY_ALG_FULLTEXT) ?
|
||||||
|
0 : HA_READ_NEXT | HA_READ_PREV | HA_READ_RANGE |
|
||||||
|
HA_READ_ORDER | HA_KEYREAD_ONLY);
|
||||||
|
}
|
||||||
|
/* The following defines can be increased if necessary */
|
||||||
|
#define BLACKHOLE_MAX_KEY 64 /* Max allowed keys */
|
||||||
|
#define BLACKHOLE_MAX_KEY_SEG 16 /* Max segments for key */
|
||||||
|
#define BLACKHOLE_MAX_KEY_LENGTH 1000
|
||||||
|
uint max_supported_keys() const { return BLACKHOLE_MAX_KEY; }
|
||||||
|
uint max_supported_key_length() const { return BLACKHOLE_MAX_KEY_LENGTH; }
|
||||||
|
uint max_supported_key_part_length() const { return BLACKHOLE_MAX_KEY_LENGTH; }
|
||||||
|
int open(const char *name, int mode, uint test_if_locked);
|
||||||
|
int close(void);
|
||||||
|
int write_row(byte * buf);
|
||||||
|
int rnd_init(bool scan);
|
||||||
|
int rnd_next(byte *buf);
|
||||||
|
int rnd_pos(byte * buf, byte *pos);
|
||||||
|
void position(const byte *record);
|
||||||
|
void info(uint flag);
|
||||||
|
int external_lock(THD *thd, int lock_type);
|
||||||
|
int create(const char *name, TABLE *table_arg,
|
||||||
|
HA_CREATE_INFO *create_info);
|
||||||
|
THR_LOCK_DATA **store_lock(THD *thd,
|
||||||
|
THR_LOCK_DATA **to,
|
||||||
|
enum thr_lock_type lock_type);
|
||||||
|
};
|
@ -32,6 +32,9 @@
|
|||||||
#ifdef HAVE_BERKELEY_DB
|
#ifdef HAVE_BERKELEY_DB
|
||||||
#include "ha_berkeley.h"
|
#include "ha_berkeley.h"
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef HAVE_BLACKHOLE_DB
|
||||||
|
#include "ha_blackhole.h"
|
||||||
|
#endif
|
||||||
#ifdef HAVE_EXAMPLE_DB
|
#ifdef HAVE_EXAMPLE_DB
|
||||||
#include "examples/ha_example.h"
|
#include "examples/ha_example.h"
|
||||||
#endif
|
#endif
|
||||||
@ -96,6 +99,8 @@ struct show_table_type_st sys_table_types[]=
|
|||||||
"Archive storage engine", DB_TYPE_ARCHIVE_DB},
|
"Archive storage engine", DB_TYPE_ARCHIVE_DB},
|
||||||
{"CSV",&have_csv_db,
|
{"CSV",&have_csv_db,
|
||||||
"CSV storage engine", DB_TYPE_CSV_DB},
|
"CSV storage engine", DB_TYPE_CSV_DB},
|
||||||
|
{"BLACKHOLE",&have_blackhole_db,
|
||||||
|
"Storage engine designed to act as null storage", DB_TYPE_BLACKHOLE_DB},
|
||||||
{NullS, NULL, NullS, DB_TYPE_UNKNOWN}
|
{NullS, NULL, NullS, DB_TYPE_UNKNOWN}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -204,6 +209,10 @@ handler *get_new_handler(TABLE *table, enum db_type db_type)
|
|||||||
case DB_TYPE_ARCHIVE_DB:
|
case DB_TYPE_ARCHIVE_DB:
|
||||||
return new ha_archive(table);
|
return new ha_archive(table);
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef HAVE_BLACKHOLE_DB
|
||||||
|
case DB_TYPE_BLACKHOLE_DB:
|
||||||
|
return new ha_blackhole(table);
|
||||||
|
#endif
|
||||||
#ifdef HAVE_CSV_DB
|
#ifdef HAVE_CSV_DB
|
||||||
case DB_TYPE_CSV_DB:
|
case DB_TYPE_CSV_DB:
|
||||||
return new ha_tina(table);
|
return new ha_tina(table);
|
||||||
|
@ -150,6 +150,7 @@ enum db_type
|
|||||||
DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB,
|
DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB,
|
||||||
DB_TYPE_GEMINI, DB_TYPE_NDBCLUSTER,
|
DB_TYPE_GEMINI, DB_TYPE_NDBCLUSTER,
|
||||||
DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_CSV_DB,
|
DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_CSV_DB,
|
||||||
|
DB_TYPE_BLACKHOLE_DB,
|
||||||
|
|
||||||
DB_TYPE_DEFAULT // Must be last
|
DB_TYPE_DEFAULT // Must be last
|
||||||
};
|
};
|
||||||
|
@ -967,6 +967,7 @@ extern SHOW_COMP_OPTION have_query_cache, have_berkeley_db, have_innodb;
|
|||||||
extern SHOW_COMP_OPTION have_geometry, have_rtree_keys;
|
extern SHOW_COMP_OPTION have_geometry, have_rtree_keys;
|
||||||
extern SHOW_COMP_OPTION have_crypt;
|
extern SHOW_COMP_OPTION have_crypt;
|
||||||
extern SHOW_COMP_OPTION have_compress;
|
extern SHOW_COMP_OPTION have_compress;
|
||||||
|
extern SHOW_COMP_OPTION have_blackhole_db;
|
||||||
|
|
||||||
#ifndef __WIN__
|
#ifndef __WIN__
|
||||||
extern pthread_t signal_thread;
|
extern pthread_t signal_thread;
|
||||||
|
@ -398,6 +398,7 @@ SHOW_COMP_OPTION have_berkeley_db, have_innodb, have_isam, have_ndbcluster,
|
|||||||
SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache;
|
SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache;
|
||||||
SHOW_COMP_OPTION have_geometry, have_rtree_keys;
|
SHOW_COMP_OPTION have_geometry, have_rtree_keys;
|
||||||
SHOW_COMP_OPTION have_crypt, have_compress;
|
SHOW_COMP_OPTION have_crypt, have_compress;
|
||||||
|
SHOW_COMP_OPTION have_blackhole_db;
|
||||||
|
|
||||||
/* Thread specific variables */
|
/* Thread specific variables */
|
||||||
|
|
||||||
@ -5758,6 +5759,11 @@ static void mysql_init_variables(void)
|
|||||||
#else
|
#else
|
||||||
have_archive_db= SHOW_OPTION_NO;
|
have_archive_db= SHOW_OPTION_NO;
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef HAVE_BLACKHOLE_DB
|
||||||
|
have_blackhole_db= SHOW_OPTION_YES;
|
||||||
|
#else
|
||||||
|
have_blackhole_db= SHOW_OPTION_NO;
|
||||||
|
#endif
|
||||||
#ifdef HAVE_CSV_DB
|
#ifdef HAVE_CSV_DB
|
||||||
have_csv_db= SHOW_OPTION_YES;
|
have_csv_db= SHOW_OPTION_YES;
|
||||||
#else
|
#else
|
||||||
|
@ -703,6 +703,7 @@ struct show_var_st init_vars[]= {
|
|||||||
{sys_group_concat_max_len.name, (char*) &sys_group_concat_max_len, SHOW_SYS},
|
{sys_group_concat_max_len.name, (char*) &sys_group_concat_max_len, SHOW_SYS},
|
||||||
{"have_archive", (char*) &have_archive_db, SHOW_HAVE},
|
{"have_archive", (char*) &have_archive_db, SHOW_HAVE},
|
||||||
{"have_bdb", (char*) &have_berkeley_db, SHOW_HAVE},
|
{"have_bdb", (char*) &have_berkeley_db, SHOW_HAVE},
|
||||||
|
{"have_blackhole_engine", (char*) &have_blackhole_db, SHOW_HAVE},
|
||||||
{"have_compress", (char*) &have_compress, SHOW_HAVE},
|
{"have_compress", (char*) &have_compress, SHOW_HAVE},
|
||||||
{"have_crypt", (char*) &have_crypt, SHOW_HAVE},
|
{"have_crypt", (char*) &have_crypt, SHOW_HAVE},
|
||||||
{"have_csv", (char*) &have_csv_db, SHOW_HAVE},
|
{"have_csv", (char*) &have_csv_db, SHOW_HAVE},
|
||||||
|
Reference in New Issue
Block a user