mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
Increase max package length to 512M for mysql and mysqldump.
Faster 'read_first_row' (Fixes slow 'preparing' state) Read constant tables earlier, which provides better optimzations when using tables with <=1 row. This also fixes a complicated bug involving const tables.
This commit is contained in:
109
Docs/manual.texi
109
Docs/manual.texi
@ -810,14 +810,14 @@ is pushed up to 8 million terabytes (2 ^ 63 bytes).
|
||||
Note, however, that operating systems have their own file size
|
||||
limits. Here are some examples:
|
||||
|
||||
@multitable @columnfractions .5 .5
|
||||
@multitable @columnfractions .6 .4
|
||||
@item @strong{Operating System} @tab @strong{File Size Limit}
|
||||
@item Linux-Intel 32 bit @tab 2G, 4G or more, depends on Linux version
|
||||
@item Linux-Alpha @tab 8T (?)
|
||||
@item Solaris 2.5.1 @tab 2G (possible 4G with patch)
|
||||
@item Solaris 2.6 @tab 4G
|
||||
@item Solaris 2.7 Intel @tab 4G
|
||||
@item Solaris 2.7 ULTRA-SPARC @tab 8T (?)
|
||||
@item Solaris 2.7 ULTRA-SPARC @tab 512G
|
||||
@end multitable
|
||||
|
||||
On Linux 2.2 you can get bigger tables than 2G by using the LFS patch for
|
||||
@ -5480,7 +5480,6 @@ pipes.
|
||||
All of the above binaries are optimised for the Pentium Pro
|
||||
processor but should work on any Intel processor >= i386.
|
||||
|
||||
|
||||
In the following circumstances you will need to use the MySQL
|
||||
configuration file:
|
||||
@itemize @bullet
|
||||
@ -5498,17 +5497,23 @@ If you want to use one of the these servers:
|
||||
If you need to tune the server settings.
|
||||
@end itemize
|
||||
|
||||
There are two configuration files with the same function:
|
||||
@file{my.cnf} and @file{my.ini} file, however please note that
|
||||
only of one these should can used. Both files are plain text.
|
||||
The @file{my.cnf} file should be created in the root directory
|
||||
of drive C and the @file{my.ini} file on the WinDir directory
|
||||
e.g: @file{C:\WINDOWS} or @file{C:\WINNT}.
|
||||
If your PC uses a boot loader where the C drive isn't the boot
|
||||
drive, then your only option is to use the @file{my.ini} file.
|
||||
Also note that if you use the @code{WinMySQLAdmin} tool, only
|
||||
the @file{my.ini} file is used. The @file{\mysql\bin} directory
|
||||
contains a help file with instructions for using this tool.
|
||||
Normally you can use the @code{WinMySQLAdmin} tool to edit the
|
||||
configuration file @code{my.ini}. In this case you don't have to worry
|
||||
about the following section.
|
||||
|
||||
There are two configuration files with the same function: @file{my.cnf}
|
||||
and @file{my.ini} file, however please note that only of one these
|
||||
should be used to avoid confusion. Both files are plain text. The
|
||||
@file{my.cnf} file , if used, should be created in the root directory of
|
||||
drive C and the @file{my.ini} file on the WinDir directory e.g:
|
||||
@file{C:\WINDOWS} or @file{C:\WINNT}. MySQL will first read the
|
||||
@code{my.ini} file, followed by the @code{my.cnf} file.
|
||||
|
||||
If your PC uses a boot loader where the C drive isn't the boot drive,
|
||||
then your only option is to use the @file{my.ini} file. Also note that
|
||||
if you use the @code{WinMySQLAdmin} tool, only the @file{my.ini} file is
|
||||
used by this tool. The @file{\mysql\bin} directory contains a help file
|
||||
with instructions for using this tool.
|
||||
|
||||
Using @code{notepad.exe}, create the configuration file and
|
||||
edit the base section and keys:
|
||||
@ -5781,11 +5786,12 @@ release or the last stable release:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
Normally, if you are beginning to use MySQL for the first time
|
||||
or trying to port it to some system for which there is no binary
|
||||
distribution, we recommend going with the stable release (currently
|
||||
Version 3.23. Note that all MySQL releases are checked with the
|
||||
MySQL benchmarks and an extensive test suite before each release.
|
||||
Normally, if you are beginning to use MySQL for the first time or trying
|
||||
to port it to some system for which there is no binary distribution, we
|
||||
recommend going with the stable release (currently Version 3.23. Note
|
||||
that all MySQL releases are checked with the MySQL benchmarks and an
|
||||
extensive test suite before each release (even the development
|
||||
releases).
|
||||
|
||||
@item
|
||||
Otherwise, if you are running an old system and want to upgrade, but
|
||||
@ -6046,7 +6052,8 @@ Stable tested releases are meant to appear about 1-2 times a year, but
|
||||
if small bugs are found, a release with only bug fixes will be released.
|
||||
|
||||
@item
|
||||
Working releases are meant to appear about every 1-8 weeks.
|
||||
Working releases/bug fixes to old releases are meant to appear about
|
||||
every 1-8 weeks.
|
||||
|
||||
@item
|
||||
Binary distributions for some platforms will be made by us for major releases.
|
||||
@ -6055,12 +6062,12 @@ less frequently.
|
||||
|
||||
@item
|
||||
We usually make patches available as soon as we have located and fixed
|
||||
small bugs.
|
||||
small bugs. They are posted to @email{bugs@@lists.mysql.com} and will
|
||||
be added to the next release.
|
||||
|
||||
@item
|
||||
For non-critical but annoying bugs, we will make patches available if they
|
||||
are sent to us. Otherwise we will combine many of them into a larger
|
||||
patch.
|
||||
For non-critical but annoying bugs, we will add them the MySQL source
|
||||
repository and they will be fixed in the next release.
|
||||
|
||||
@item
|
||||
If there is, by any chance, a fatal bug in a release we will make a new
|
||||
@ -6075,6 +6082,10 @@ and things that ``must be done.'' ``Somewhat frozen'' means that we may
|
||||
add small things that ``almost surely will not affect anything that's
|
||||
already working.''
|
||||
|
||||
MySQL uses a little different naming scheme that most other products.
|
||||
In general it's relatively safe to use any version that has been out for
|
||||
a couple of weeks without being replaced with a new version.
|
||||
@xref{Which version}.
|
||||
|
||||
@node MySQL binaries, Installing binary, Many versions, General Installation Issues
|
||||
@subsection MySQL Binaries Compiled by MySQL AB
|
||||
@ -19487,7 +19498,7 @@ bytes when needed. This value by default is small, to catch big (possibly
|
||||
wrong) packets. You must increase this value if you are using big
|
||||
@code{BLOB} columns. It should be as big as the biggest @code{BLOB} you want
|
||||
to use. The protocol limits for @code{max_allowed_packet} is 16M in MySQL
|
||||
3.23 and 4G in MySQL 4.0.
|
||||
3.23 and 2G in MySQL 4.0.
|
||||
|
||||
@item @code{max_binlog_cache_size}
|
||||
If a multi-statement transaction requires more than this amount of memory,
|
||||
@ -36746,6 +36757,11 @@ The following are the known problems with @code{MERGE} tables:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
@code{MERGE} table cannot maintain UNIQUE constraints over the whole
|
||||
table. When you do INSERT, the data goes into the first or last table
|
||||
(according to @code{INSERT_METHOD=xxx}) and this MyISAM table ensures
|
||||
that the data are unique, but it knows nothing about the first MyISAM table.
|
||||
@item
|
||||
@code{DELETE FROM merge_table} used without a @code{WHERE}
|
||||
will only clear the mapping for the table, not delete everything in the
|
||||
mapped tables.
|
||||
@ -38643,6 +38659,7 @@ Finland
|
||||
* BDB characteristics:: Characteristics of @code{BDB} tables:
|
||||
* BDB TODO:: Things we need to fix for BDB in the near future:
|
||||
* BDB portability:: Operating systems supported by @strong{BDB}
|
||||
* BDB restrictions::
|
||||
* BDB errors:: Errors That May Occur When Using BDB Tables
|
||||
@end menu
|
||||
|
||||
@ -38851,7 +38868,7 @@ Change to not use page locks at all when we are scanning tables.
|
||||
@end itemize
|
||||
|
||||
|
||||
@node BDB portability, BDB errors, BDB TODO, BDB
|
||||
@node BDB portability, BDB restrictions, BDB TODO, BDB
|
||||
@subsection Operating systems supported by @strong{BDB}
|
||||
|
||||
If you after having built MySQL with support for BDB tables get
|
||||
@ -38891,8 +38908,28 @@ Linux 2.x Alpha
|
||||
Max OS X
|
||||
@end itemize
|
||||
|
||||
@node BDB restrictions, BDB errors, BDB portability, BDB
|
||||
@subsection Restrictions on BDB Tables
|
||||
|
||||
@node BDB errors, , BDB portability, BDB
|
||||
Here follows the restrictions you have when using BDB tables:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
BDB tables store in the .db file the path to the file as it was created
|
||||
(My guess is that this is to be able to detect locks in a multi-user
|
||||
environment that supports symlinks).
|
||||
|
||||
The effect of this is that BDB tables are not movable between directories!
|
||||
@item
|
||||
When taking backups of BDB tables, you have to either use
|
||||
@code{mysqldump} or take a backup of all @code{table_name.db} files and
|
||||
the BDB log files. The BDB log files are the files in the base data
|
||||
directory named @code{log.XXXXXX} (6 digits);
|
||||
The BDB table handler stores unfinished transactions in the log files
|
||||
and requires these to be present when @code{mysqld} starts.
|
||||
@end itemize
|
||||
|
||||
@node BDB errors, , BDB restrictions, BDB
|
||||
@subsection Errors That May Occur When Using BDB Tables
|
||||
|
||||
@itemize @bullet
|
||||
@ -45281,6 +45318,11 @@ When a MySQL client or the @code{mysqld} server gets a packet bigger
|
||||
than @code{max_allowed_packet} bytes, it issues a @code{Packet too large}
|
||||
error and closes the connection.
|
||||
|
||||
In MySQL 3.23 the biggest possible packet is 16M (due to limits in the
|
||||
client/server protocol). In MySQL 4.0.1 and up, this is only limited by
|
||||
the amount on memory you have on your server (up to a theoretical
|
||||
maximum of 2G).
|
||||
|
||||
A communication packet is a single SQL statement sent to the MySQL server
|
||||
or a single row that is sent to the client.
|
||||
|
||||
@ -47090,11 +47132,6 @@ New version of netadmin. See above for details.
|
||||
Atronic's MySQL client for Windows 2.0.3.0.
|
||||
The home page for this can be found at @uref{http://www.artronic.hr/}.
|
||||
|
||||
@item @uref{http://www.mysql.com/Downloads/Win32/mysqlfront.zip}
|
||||
MySQL Front, home page: @uref{http://www.mysqlfront.de/}.
|
||||
Win32-Client for accessing and managing dbs, tables, table-data, indexes,
|
||||
import-/export-files. (Freeware). By Ansgar Becker.
|
||||
|
||||
@item @uref{http://www.mysql.com/Downloads/Win32/W9xstop.zip}
|
||||
Utility from Artronic to stop MySQL on win9x.
|
||||
|
||||
@ -48135,6 +48172,14 @@ Our TODO section contains what we plan to have in 4.0. @xref{TODO MySQL 4.0}.
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
Fixed optimization problem where a MySQL was a long time in a
|
||||
"preparing" state when selecting from an empty table which had contained
|
||||
a lot of rows.
|
||||
@item
|
||||
Fixed bug in complicated join with @code{const} tables. This fix also
|
||||
improves performance a bit when referring to another table from a
|
||||
@code{const} table.
|
||||
@item
|
||||
Fixed bug in multi table delete.
|
||||
@item
|
||||
Fixed bug in @code{SELECT CONCAT(argument-list) ... GROUP BY 1}.
|
||||
|
@ -477,8 +477,8 @@ static struct option long_options[] =
|
||||
CHANGEABLE_VAR changeable_vars[] = {
|
||||
{ "connect_timeout", (long*) &opt_connect_timeout, 0, 0, 3600*12, 0, 1},
|
||||
{ "max_allowed_packet", (long*) &max_allowed_packet,16*1024L*1024L,4096,
|
||||
24*1024L*1024L, MALLOC_OVERHEAD,1024},
|
||||
{ "net_buffer_length",(long*) &net_buffer_length,16384,1024,24*1024*1024L,
|
||||
512*1024L*1024L, MALLOC_OVERHEAD,1024},
|
||||
{ "net_buffer_length",(long*) &net_buffer_length,16384,1024,512*1024*1024L,
|
||||
MALLOC_OVERHEAD,1024},
|
||||
{ "select_limit", (long*) &select_limit, 1000L, 1, ~0L, 0, 1},
|
||||
{ "max_join_size", (long*) &max_join_size, 1000000L, 1, ~0L, 0, 1},
|
||||
|
@ -147,9 +147,9 @@ static const char *load_default_groups[]= { "mysqldump","client",0 };
|
||||
|
||||
CHANGEABLE_VAR md_changeable_vars[] = {
|
||||
{ "max_allowed_packet", (long*) &max_allowed_packet,24*1024*1024,4096,
|
||||
24*1024L*1024L,MALLOC_OVERHEAD,1024},
|
||||
512*1024L*1024L,MALLOC_OVERHEAD,1024},
|
||||
{ "net_buffer_length", (long*) &net_buffer_length,1024*1024L-1025,4096,
|
||||
24*1024L*1024L,MALLOC_OVERHEAD,1024},
|
||||
512*1024L*1024L,MALLOC_OVERHEAD,1024},
|
||||
{ 0, 0, 0, 0, 0, 0, 0}
|
||||
};
|
||||
|
||||
|
@ -1928,7 +1928,7 @@ static void dbug_flush(CODE_STATE *state)
|
||||
{
|
||||
if (!(freopen(stack->name,"a",_db_fp_)))
|
||||
{
|
||||
(void) fprintf(stderr, ERR_OPEN, _db_process_);
|
||||
(void) fprintf(stderr, ERR_OPEN, _db_process_, stack->name);
|
||||
fflush(stderr);
|
||||
_db_fp_ = stdout;
|
||||
stack -> out_file = _db_fp_;
|
||||
|
@ -577,3 +577,51 @@ handler t1 read t first;
|
||||
--error 1109
|
||||
handler t1 close;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Test of multi-table-delete
|
||||
#
|
||||
|
||||
CREATE TABLE t1 (
|
||||
number bigint(20) NOT NULL default '0',
|
||||
cname char(15) NOT NULL default '',
|
||||
carrier_id smallint(6) NOT NULL default '0',
|
||||
privacy tinyint(4) NOT NULL default '0',
|
||||
last_mod_date timestamp(14) NOT NULL,
|
||||
last_mod_id smallint(6) NOT NULL default '0',
|
||||
last_app_date timestamp(14) NOT NULL,
|
||||
last_app_id smallint(6) default '-1',
|
||||
version smallint(6) NOT NULL default '0',
|
||||
assigned_scps int(11) default '0',
|
||||
status tinyint(4) default '0'
|
||||
) TYPE=InnoDB;
|
||||
INSERT INTO t1 VALUES (4077711111,'SeanWheeler',90,2,20020111112846,500,00000000000000,-1,2,3,1);
|
||||
INSERT INTO t1 VALUES (9197722223,'berry',90,3,20020111112809,500,20020102114532,501,4,10,0);
|
||||
INSERT INTO t1 VALUES (650,'San Francisco',0,0,20011227111336,342,00000000000000,-1,1,24,1);
|
||||
INSERT INTO t1 VALUES (302467,'Sue\'s Subshop',90,3,20020109113241,500,20020102115111,501,7,24,0);
|
||||
INSERT INTO t1 VALUES (6014911113,'SudzCarwash',520,1,20020102115234,500,20020102115259,501,33,32768,0);
|
||||
INSERT INTO t1 VALUES (333,'tubs',99,2,20020109113440,501,20020109113440,500,3,10,0);
|
||||
CREATE TABLE t2 (
|
||||
number bigint(20) NOT NULL default '0',
|
||||
cname char(15) NOT NULL default '',
|
||||
carrier_id smallint(6) NOT NULL default '0',
|
||||
privacy tinyint(4) NOT NULL default '0',
|
||||
last_mod_date timestamp(14) NOT NULL,
|
||||
last_mod_id smallint(6) NOT NULL default '0',
|
||||
last_app_date timestamp(14) NOT NULL,
|
||||
last_app_id smallint(6) default '-1',
|
||||
version smallint(6) NOT NULL default '0',
|
||||
assigned_scps int(11) default '0',
|
||||
status tinyint(4) default '0'
|
||||
) TYPE=InnoDB;
|
||||
INSERT INTO t2 VALUES (4077711111,'SeanWheeler',0,2,20020111112853,500,00000000000000,-1,2,3,1);
|
||||
INSERT INTO t2 VALUES (9197722223,'berry',90,3,20020111112818,500,20020102114532,501,4,10,0);
|
||||
INSERT INTO t2 VALUES (650,'San Francisco',90,0,20020109113158,342,00000000000000,-1,1,24,1);
|
||||
INSERT INTO t2 VALUES (333,'tubs',99,2,20020109113453,501,20020109113453,500,3,10,0);
|
||||
select * from t1;
|
||||
select * from t2;
|
||||
delete t1, t2 from t1 left join t2 on t1.number=t2.number where (t1.carrier_id=90 and t1.number=t2.number) or (t2.carrier_id=90 and t1.number=t2.number) or (t1.carrier_id=90 and t2.number is null);
|
||||
select * from t1;
|
||||
select * from t2;
|
||||
select * from t2;
|
||||
drop table t1,t2;
|
||||
|
@ -472,17 +472,36 @@ int handler::analyze(THD* thd, HA_CHECK_OPT* check_opt)
|
||||
return HA_ADMIN_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
/* Read first row from a table */
|
||||
/*
|
||||
Read first row (only) from a table
|
||||
This is never called for InnoDB or BDB tables, as these table types
|
||||
has the HA_NOT_EXACT_COUNT set.
|
||||
*/
|
||||
|
||||
int handler::rnd_first(byte * buf)
|
||||
int handler::read_first_row(byte * buf, uint primary_key)
|
||||
{
|
||||
register int error;
|
||||
DBUG_ENTER("handler::rnd_first");
|
||||
DBUG_ENTER("handler::read_first_row");
|
||||
|
||||
statistic_increment(ha_read_first_count,&LOCK_status);
|
||||
(void) rnd_init();
|
||||
while ((error= rnd_next(buf)) == HA_ERR_RECORD_DELETED) ;
|
||||
(void) rnd_end();
|
||||
|
||||
/*
|
||||
If there is very few deleted rows in the table, find the first row by
|
||||
scanning the table.
|
||||
*/
|
||||
if (deleted < 10 || primary_key >= MAX_KEY)
|
||||
{
|
||||
(void) rnd_init();
|
||||
while ((error= rnd_next(buf)) == HA_ERR_RECORD_DELETED) ;
|
||||
(void) rnd_end();
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Find the first row through the primary key */
|
||||
(void) index_init(primary_key);
|
||||
error=index_first(buf);
|
||||
(void) index_end();
|
||||
}
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
@ -498,7 +517,7 @@ int handler::restart_rnd_next(byte *buf, byte *pos)
|
||||
}
|
||||
|
||||
|
||||
/* Set a timestamp in record */
|
||||
/* Set a timestamp in record */
|
||||
|
||||
void handler::update_timestamp(byte *record)
|
||||
{
|
||||
@ -514,9 +533,10 @@ void handler::update_timestamp(byte *record)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Updates field with field_type NEXT_NUMBER according to following:
|
||||
** if field = 0 change field to the next free key in database.
|
||||
*/
|
||||
/*
|
||||
Updates field with field_type NEXT_NUMBER according to following:
|
||||
if field = 0 change field to the next free key in database.
|
||||
*/
|
||||
|
||||
void handler::update_auto_increment()
|
||||
{
|
||||
|
@ -269,7 +269,7 @@ public:
|
||||
virtual int rnd_end() { return 0; }
|
||||
virtual int rnd_next(byte *buf)=0;
|
||||
virtual int rnd_pos(byte * buf, byte *pos)=0;
|
||||
virtual int rnd_first(byte *buf);
|
||||
virtual int read_first_row(byte *buf, uint primary_key);
|
||||
virtual int restart_rnd_next(byte *buf, byte *pos);
|
||||
virtual ha_rows records_in_range(int inx,
|
||||
const byte *start_key,uint start_key_len,
|
||||
|
@ -585,6 +585,9 @@ int SQL_SELECT::test_quick_select(key_map keys_to_use, table_map prev_tables,
|
||||
uint idx;
|
||||
double scan_time;
|
||||
DBUG_ENTER("test_quick_select");
|
||||
DBUG_PRINT("enter",("keys_to_use: %lu prev_tables: %lu const_tables: %lu",
|
||||
(ulong) keys_to_use, (ulong) prev_tables,
|
||||
(ulong) const_tables));
|
||||
|
||||
delete quick;
|
||||
quick=0;
|
||||
|
@ -41,6 +41,8 @@ static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,
|
||||
uint tables,COND *conds,table_map table_map);
|
||||
static int sort_keyuse(KEYUSE *a,KEYUSE *b);
|
||||
static void set_position(JOIN *join,uint index,JOIN_TAB *table,KEYUSE *key);
|
||||
static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
|
||||
table_map used_tables);
|
||||
static void find_best_combination(JOIN *join,table_map rest_tables);
|
||||
static void find_best(JOIN *join,table_map rest_tables,uint index,
|
||||
double record_count,double read_time);
|
||||
@ -84,7 +86,7 @@ static int end_unique_update(JOIN *join,JOIN_TAB *join_tab,
|
||||
static int end_write_group(JOIN *join, JOIN_TAB *join_tab,
|
||||
bool end_of_records);
|
||||
static int test_if_group_changed(List<Item_buff> &list);
|
||||
static int join_read_const_tables(JOIN *join);
|
||||
static int join_read_const_table(JOIN_TAB *tab, POSITION *pos);
|
||||
static int join_read_system(JOIN_TAB *tab);
|
||||
static int join_read_const(JOIN_TAB *tab);
|
||||
static int join_read_key(JOIN_TAB *tab);
|
||||
@ -413,9 +415,8 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds,
|
||||
goto err;
|
||||
thd->proc_info="preparing";
|
||||
result->initialize_tables(&join);
|
||||
if ((tmp=join_read_const_tables(&join)) > 0)
|
||||
goto err;
|
||||
if (tmp && !(select_options & SELECT_DESCRIBE))
|
||||
if (join.const_table_map != join.found_const_table_map &&
|
||||
!(select_options & SELECT_DESCRIBE))
|
||||
{
|
||||
error=return_zero_rows(result,tables,fields,
|
||||
join.tmp_table_param.sum_func_count != 0 &&
|
||||
@ -903,7 +904,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
|
||||
{
|
||||
int error;
|
||||
uint i,table_count,const_count,found_ref,refs,key,const_ref,eq_part;
|
||||
table_map const_table_map,all_table_map;
|
||||
table_map const_table_map,found_const_table_map,all_table_map;
|
||||
TABLE **table_vector;
|
||||
JOIN_TAB *stat,*stat_end,*s,**stat_ref;
|
||||
SQL_SELECT *select;
|
||||
@ -923,7 +924,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
|
||||
join->best_ref=stat_vector;
|
||||
|
||||
stat_end=stat+table_count;
|
||||
const_table_map=all_table_map=0;
|
||||
const_table_map=found_const_table_map=all_table_map=0;
|
||||
const_count=0;
|
||||
|
||||
for (s=stat,i=0 ; tables ; s++,tables=tables->next,i++)
|
||||
@ -938,13 +939,13 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
|
||||
bzero((char*) table->const_key_parts, sizeof(key_part_map)*table->keys);
|
||||
all_table_map|= table->map;
|
||||
s->join=join;
|
||||
s->info=0; // For describe
|
||||
if ((s->on_expr=tables->on_expr))
|
||||
{
|
||||
/* Left join */
|
||||
if (!table->file->records)
|
||||
{ // Empty table
|
||||
s->key_dependent=s->dependent=0;
|
||||
s->type=JT_SYSTEM;
|
||||
const_table_map|=table->map;
|
||||
s->key_dependent=s->dependent=0; // Ignore LEFT JOIN depend.
|
||||
set_position(join,const_count++,s,(KEYUSE*) 0);
|
||||
continue;
|
||||
}
|
||||
@ -965,8 +966,6 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
|
||||
if ((table->system || table->file->records <= 1) && ! s->dependent &&
|
||||
!(table->file->option_flag() & HA_NOT_EXACT_COUNT))
|
||||
{
|
||||
s->type=JT_SYSTEM;
|
||||
const_table_map|=table->map;
|
||||
set_position(join,const_count++,s,(KEYUSE*) 0);
|
||||
}
|
||||
}
|
||||
@ -974,10 +973,10 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
|
||||
join->outer_join=outer_join;
|
||||
|
||||
/*
|
||||
** If outer join: Re-arrange tables in stat_vector so that outer join
|
||||
** tables are after all tables it is dependent of.
|
||||
** For example: SELECT * from A LEFT JOIN B ON B.c=C.c, C WHERE A.C=C.C
|
||||
** Will shift table B after table C.
|
||||
If outer join: Re-arrange tables in stat_vector so that outer join
|
||||
tables are after all tables it is dependent of.
|
||||
For example: SELECT * from A LEFT JOIN B ON B.c=C.c, C WHERE A.C=C.C
|
||||
Will shift table B after table C.
|
||||
*/
|
||||
if (outer_join)
|
||||
{
|
||||
@ -1014,31 +1013,66 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
|
||||
conds,~outer_join))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
/* Read tables with 0 or 1 rows (system tables) */
|
||||
join->const_table_map=const_table_map;
|
||||
|
||||
for (POSITION *p_pos=join->positions, *p_end=p_pos+const_count;
|
||||
p_pos < p_end ;
|
||||
p_pos++)
|
||||
{
|
||||
int tmp;
|
||||
s= p_pos->table;
|
||||
s->type=JT_SYSTEM;
|
||||
join->const_table_map|=s->table->map;
|
||||
if ((tmp=join_read_const_table(s, p_pos)))
|
||||
{
|
||||
if (tmp > 0)
|
||||
DBUG_RETURN(1); // Fatal error
|
||||
}
|
||||
else
|
||||
found_const_table_map|= s->table->map;
|
||||
}
|
||||
|
||||
/* loop until no more const tables are found */
|
||||
int ref_changed;
|
||||
do
|
||||
{
|
||||
ref_changed = 0;
|
||||
found_ref=0;
|
||||
for (JOIN_TAB **pos=stat_vector+const_count; (s= *pos) ; pos++)
|
||||
|
||||
/*
|
||||
We only have to loop from stat_vector + const_count as
|
||||
set_position() will move all const_tables first in stat_vector
|
||||
*/
|
||||
|
||||
for (JOIN_TAB **pos=stat_vector+const_count ; (s= *pos) ; pos++)
|
||||
{
|
||||
TABLE *table=s->table;
|
||||
if (s->dependent) // If dependent on some table
|
||||
{
|
||||
if (s->dependent & ~(const_table_map)) // All dep. must be constants
|
||||
// All dep. must be constants
|
||||
if (s->dependent & ~(join->const_table_map))
|
||||
continue;
|
||||
if (s->table->file->records <= 1L &&
|
||||
!(s->table->file->option_flag() & HA_NOT_EXACT_COUNT))
|
||||
if (table->file->records <= 1L &&
|
||||
!(table->file->option_flag() & HA_NOT_EXACT_COUNT))
|
||||
{ // system table
|
||||
int tmp;
|
||||
s->type=JT_SYSTEM;
|
||||
const_table_map|=s->table->map;
|
||||
join->const_table_map|=table->map;
|
||||
set_position(join,const_count++,s,(KEYUSE*) 0);
|
||||
if ((tmp=join_read_const_table(s,join->positions+const_count-1)))
|
||||
{
|
||||
if (tmp > 0)
|
||||
DBUG_RETURN(1); // Fatal error
|
||||
}
|
||||
else
|
||||
found_const_table_map|= table->map;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
/* check if table can be read by key or table only uses const refs */
|
||||
if ((keyuse=s->keyuse))
|
||||
{
|
||||
TABLE *table=s->table;
|
||||
s->type= JT_REF;
|
||||
while (keyuse->table == table)
|
||||
{
|
||||
@ -1051,7 +1085,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
|
||||
{
|
||||
if (keyuse->val->type() != Item::NULL_ITEM)
|
||||
{
|
||||
if (!((~const_table_map) & keyuse->used_tables))
|
||||
if (!((~join->const_table_map) & keyuse->used_tables))
|
||||
const_ref|= (key_map) 1 << keyuse->keypart;
|
||||
else
|
||||
refs|=keyuse->used_tables;
|
||||
@ -1065,10 +1099,22 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
|
||||
{
|
||||
if (const_ref == eq_part)
|
||||
{ // Found everything for ref.
|
||||
s->type=JT_CONST;
|
||||
const_table_map|=table->map;
|
||||
set_position(join,const_count++,s,start_keyuse);
|
||||
int tmp;
|
||||
ref_changed = 1;
|
||||
s->type=JT_CONST;
|
||||
join->const_table_map|=table->map;
|
||||
set_position(join,const_count++,s,start_keyuse);
|
||||
if (create_ref_for_key(join, s, start_keyuse,
|
||||
join->const_table_map))
|
||||
DBUG_RETURN(1);
|
||||
if ((tmp=join_read_const_table(s,
|
||||
join->positions+const_count-1)))
|
||||
{
|
||||
if (tmp > 0)
|
||||
DBUG_RETURN(1); // Fatal error
|
||||
}
|
||||
else
|
||||
found_const_table_map|= table->map;
|
||||
break;
|
||||
}
|
||||
else
|
||||
@ -1077,7 +1123,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (const_table_map & found_ref && ref_changed);
|
||||
} while (join->const_table_map & found_ref && ref_changed);
|
||||
|
||||
/* Calc how many (possible) matched records in each table */
|
||||
|
||||
@ -1093,8 +1139,10 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
|
||||
s->found_records=s->records=s->table->file->records;
|
||||
s->read_time=(ha_rows) s->table->file->scan_time();
|
||||
|
||||
/* Set a max range of how many seeks we can expect when using keys */
|
||||
/* This was (s->read_time*5), but this was too low with small rows */
|
||||
/*
|
||||
Set a max range of how many seeks we can expect when using keys
|
||||
This was (s->read_time*5), but this was too low with small rows
|
||||
*/
|
||||
s->worst_seeks= (double) s->found_records / 5;
|
||||
if (s->worst_seeks < 2.0) // Fix for small tables
|
||||
s->worst_seeks=2.0;
|
||||
@ -1105,15 +1153,14 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
|
||||
{
|
||||
ha_rows records;
|
||||
if (!select)
|
||||
select=make_select(s->table,const_table_map,
|
||||
0,
|
||||
select=make_select(s->table, join->const_table_map,
|
||||
join->const_table_map,
|
||||
and_conds(conds,s->on_expr),&error);
|
||||
records=get_quick_record_count(select,s->table, s->const_keys,
|
||||
join->row_limit);
|
||||
s->quick=select->quick;
|
||||
s->needed_reg=select->needed_reg;
|
||||
select->quick=0;
|
||||
select->read_tables=const_table_map;
|
||||
if (records != HA_POS_ERROR)
|
||||
{
|
||||
s->found_records=records;
|
||||
@ -1128,10 +1175,10 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds,
|
||||
join->map2table=stat_ref;
|
||||
join->table= join->all_tables=table_vector;
|
||||
join->const_tables=const_count;
|
||||
join->const_table_map=const_table_map;
|
||||
join->found_const_table_map=found_const_table_map;
|
||||
|
||||
if (join->const_tables != join->tables)
|
||||
find_best_combination(join,all_table_map & ~const_table_map);
|
||||
find_best_combination(join,all_table_map & ~join->const_table_map);
|
||||
else
|
||||
{
|
||||
memcpy((gptr) join->best_positions,(gptr) join->positions,
|
||||
@ -2040,12 +2087,10 @@ prev_record_reads(JOIN *join,table_map found_ref)
|
||||
static bool
|
||||
get_best_combination(JOIN *join)
|
||||
{
|
||||
uint i,key,tablenr;
|
||||
uint i,tablenr;
|
||||
table_map used_tables;
|
||||
TABLE *table;
|
||||
JOIN_TAB *join_tab,*j;
|
||||
KEYUSE *keyuse;
|
||||
KEY *keyinfo;
|
||||
uint table_count;
|
||||
THD *thd=join->thd;
|
||||
|
||||
@ -2054,8 +2099,6 @@ get_best_combination(JOIN *join)
|
||||
(JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB)*table_count)))
|
||||
return TRUE;
|
||||
|
||||
join->const_tables=0; /* for checking */
|
||||
join->const_table_map=0;
|
||||
join->full_join=0;
|
||||
|
||||
used_tables=0;
|
||||
@ -2064,160 +2107,26 @@ get_best_combination(JOIN *join)
|
||||
TABLE *form;
|
||||
*j= *join->best_positions[tablenr].table;
|
||||
form=join->table[tablenr]=j->table;
|
||||
j->ref.key = -1;
|
||||
j->ref.key_parts=0;
|
||||
j->info=0; // For describe
|
||||
used_tables|= form->map;
|
||||
form->reginfo.join_tab=j;
|
||||
if (!j->on_expr)
|
||||
form->reginfo.not_exists_optimize=0; // Only with LEFT JOIN
|
||||
if (j->type == JT_CONST)
|
||||
continue; // Handled in make_join_stat..
|
||||
|
||||
j->ref.key = -1;
|
||||
j->ref.key_parts=0;
|
||||
|
||||
if (j->type == JT_SYSTEM)
|
||||
{
|
||||
j->table->const_table=1;
|
||||
if (join->const_tables == tablenr)
|
||||
{
|
||||
join->const_tables++;
|
||||
join->const_table_map|=form->map;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (!j->keys || !(keyuse= join->best_positions[tablenr].key))
|
||||
{
|
||||
j->type=JT_ALL;
|
||||
if (tablenr != join->const_tables)
|
||||
join->full_join=1;
|
||||
}
|
||||
else
|
||||
{
|
||||
uint keyparts,length;
|
||||
bool ftkey=(keyuse->keypart == FT_KEYPART);
|
||||
/*
|
||||
** Use best key from find_best
|
||||
*/
|
||||
table=j->table;
|
||||
key=keyuse->key;
|
||||
|
||||
keyinfo=table->key_info+key;
|
||||
if (ftkey)
|
||||
{
|
||||
Item_func_match *ifm=(Item_func_match *)keyuse->val;
|
||||
|
||||
length=0;
|
||||
keyparts=1;
|
||||
ifm->join_key=1;
|
||||
}
|
||||
else
|
||||
{
|
||||
keyparts=length=0;
|
||||
do
|
||||
{
|
||||
if (!((~used_tables) & keyuse->used_tables))
|
||||
{
|
||||
if (keyparts == keyuse->keypart)
|
||||
{
|
||||
keyparts++;
|
||||
length+=keyinfo->key_part[keyuse->keypart].store_length;
|
||||
}
|
||||
}
|
||||
keyuse++;
|
||||
} while (keyuse->table == table && keyuse->key == key);
|
||||
} /* not ftkey */
|
||||
|
||||
/* set up fieldref */
|
||||
keyinfo=table->key_info+key;
|
||||
j->ref.key_parts=keyparts;
|
||||
j->ref.key_length=length;
|
||||
j->ref.key=(int) key;
|
||||
if (!(j->ref.key_buff= (byte*) thd->calloc(ALIGN_SIZE(length)*2)) ||
|
||||
!(j->ref.key_copy= (store_key**) thd->alloc((sizeof(store_key*) *
|
||||
(keyparts+1)))) ||
|
||||
!(j->ref.items= (Item**) thd->alloc(sizeof(Item*)*keyparts)))
|
||||
{
|
||||
return TRUE;
|
||||
}
|
||||
j->ref.key_buff2=j->ref.key_buff+ALIGN_SIZE(length);
|
||||
j->ref.key_err=1;
|
||||
keyuse=join->best_positions[tablenr].key;
|
||||
|
||||
store_key **ref_key=j->ref.key_copy;
|
||||
byte *key_buff=j->ref.key_buff;
|
||||
if (ftkey)
|
||||
{
|
||||
j->ref.items[0]=((Item_func*)(keyuse->val))->key_item();
|
||||
if (keyuse->used_tables)
|
||||
return TRUE; // not supported yet. SerG
|
||||
|
||||
j->type=JT_FT;
|
||||
}
|
||||
else
|
||||
{
|
||||
for (i=0 ; i < keyparts ; keyuse++,i++)
|
||||
{
|
||||
while (keyuse->keypart != i ||
|
||||
((~used_tables) & keyuse->used_tables))
|
||||
keyuse++; /* Skip other parts */
|
||||
|
||||
uint maybe_null= test(keyinfo->key_part[i].null_bit);
|
||||
j->ref.items[i]=keyuse->val; // Save for cond removal
|
||||
if (!keyuse->used_tables &&
|
||||
!(join->select_options & SELECT_DESCRIBE))
|
||||
{ // Compare against constant
|
||||
store_key_item *tmp=new store_key_item(thd,
|
||||
keyinfo->key_part[i].field,
|
||||
(char*)key_buff +
|
||||
maybe_null,
|
||||
maybe_null ?
|
||||
(char*) key_buff : 0,
|
||||
keyinfo->key_part[i].length,
|
||||
keyuse->val);
|
||||
if (thd->fatal_error)
|
||||
{
|
||||
return TRUE;
|
||||
}
|
||||
tmp->copy();
|
||||
}
|
||||
else
|
||||
*ref_key++= get_store_key(join->thd,
|
||||
keyuse,join->const_table_map,
|
||||
&keyinfo->key_part[i],
|
||||
(char*) key_buff,maybe_null);
|
||||
key_buff+=keyinfo->key_part[i].store_length;
|
||||
}
|
||||
} /* not ftkey */
|
||||
*ref_key=0; // end_marker
|
||||
if (j->type == JT_FT) /* no-op */;
|
||||
else if (j->type == JT_CONST)
|
||||
{
|
||||
j->table->const_table=1;
|
||||
if (join->const_tables == tablenr)
|
||||
{
|
||||
join->const_tables++;
|
||||
join->const_table_map|=form->map;
|
||||
}
|
||||
}
|
||||
else if (((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY)) != HA_NOSAME) ||
|
||||
keyparts != keyinfo->key_parts)
|
||||
j->type=JT_REF; /* Must read with repeat */
|
||||
else if (ref_key == j->ref.key_copy)
|
||||
{ /* Should never be reached */
|
||||
/*
|
||||
This happen if we are using a constant expression in the ON part
|
||||
of an LEFT JOIN.
|
||||
SELECT * FROM a LEFT JOIN b ON b.key=30
|
||||
Here we should not mark the table as a 'const' as a field may
|
||||
have a 'normal' value or a NULL value.
|
||||
*/
|
||||
j->type=JT_CONST;
|
||||
if (join->const_tables == tablenr)
|
||||
{
|
||||
join->const_tables++;
|
||||
join->const_table_map|=form->map;
|
||||
}
|
||||
}
|
||||
else
|
||||
j->type=JT_EQ_REF;
|
||||
}
|
||||
else if (create_ref_for_key(join, j, keyuse, used_tables))
|
||||
return TRUE; // Something went wrong
|
||||
}
|
||||
|
||||
for (i=0 ; i < table_count ; i++)
|
||||
@ -2227,6 +2136,136 @@ get_best_combination(JOIN *join)
|
||||
}
|
||||
|
||||
|
||||
static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
|
||||
table_map used_tables)
|
||||
{
|
||||
KEYUSE *keyuse=org_keyuse;
|
||||
bool ftkey=(keyuse->keypart == FT_KEYPART);
|
||||
THD *thd= join->thd;
|
||||
uint keyparts,length,key;
|
||||
TABLE *table;
|
||||
KEY *keyinfo;
|
||||
|
||||
/*
|
||||
** Use best key from find_best
|
||||
*/
|
||||
table=j->table;
|
||||
key=keyuse->key;
|
||||
keyinfo=table->key_info+key;
|
||||
|
||||
if (ftkey)
|
||||
{
|
||||
Item_func_match *ifm=(Item_func_match *)keyuse->val;
|
||||
|
||||
length=0;
|
||||
keyparts=1;
|
||||
ifm->join_key=1;
|
||||
}
|
||||
else
|
||||
{
|
||||
keyparts=length=0;
|
||||
do
|
||||
{
|
||||
if (!((~used_tables) & keyuse->used_tables))
|
||||
{
|
||||
if (keyparts == keyuse->keypart)
|
||||
{
|
||||
keyparts++;
|
||||
length+=keyinfo->key_part[keyuse->keypart].store_length;
|
||||
}
|
||||
}
|
||||
keyuse++;
|
||||
} while (keyuse->table == table && keyuse->key == key);
|
||||
} /* not ftkey */
|
||||
|
||||
/* set up fieldref */
|
||||
keyinfo=table->key_info+key;
|
||||
j->ref.key_parts=keyparts;
|
||||
j->ref.key_length=length;
|
||||
j->ref.key=(int) key;
|
||||
if (!(j->ref.key_buff= (byte*) thd->calloc(ALIGN_SIZE(length)*2)) ||
|
||||
!(j->ref.key_copy= (store_key**) thd->alloc((sizeof(store_key*) *
|
||||
(keyparts+1)))) ||
|
||||
!(j->ref.items= (Item**) thd->alloc(sizeof(Item*)*keyparts)))
|
||||
{
|
||||
return TRUE;
|
||||
}
|
||||
j->ref.key_buff2=j->ref.key_buff+ALIGN_SIZE(length);
|
||||
j->ref.key_err=1;
|
||||
keyuse=org_keyuse;
|
||||
|
||||
store_key **ref_key=j->ref.key_copy;
|
||||
byte *key_buff=j->ref.key_buff;
|
||||
if (ftkey)
|
||||
{
|
||||
j->ref.items[0]=((Item_func*)(keyuse->val))->key_item();
|
||||
if (keyuse->used_tables)
|
||||
return TRUE; // not supported yet. SerG
|
||||
|
||||
j->type=JT_FT;
|
||||
}
|
||||
else
|
||||
{
|
||||
uint i;
|
||||
for (i=0 ; i < keyparts ; keyuse++,i++)
|
||||
{
|
||||
while (keyuse->keypart != i ||
|
||||
((~used_tables) & keyuse->used_tables))
|
||||
keyuse++; /* Skip other parts */
|
||||
|
||||
uint maybe_null= test(keyinfo->key_part[i].null_bit);
|
||||
j->ref.items[i]=keyuse->val; // Save for cond removal
|
||||
if (!keyuse->used_tables &&
|
||||
!(join->select_options & SELECT_DESCRIBE))
|
||||
{ // Compare against constant
|
||||
store_key_item *tmp=new store_key_item(thd,
|
||||
keyinfo->key_part[i].field,
|
||||
(char*)key_buff +
|
||||
maybe_null,
|
||||
maybe_null ?
|
||||
(char*) key_buff : 0,
|
||||
keyinfo->key_part[i].length,
|
||||
keyuse->val);
|
||||
if (thd->fatal_error)
|
||||
{
|
||||
return TRUE;
|
||||
}
|
||||
tmp->copy();
|
||||
}
|
||||
else
|
||||
*ref_key++= get_store_key(thd,
|
||||
keyuse,join->const_table_map,
|
||||
&keyinfo->key_part[i],
|
||||
(char*) key_buff,maybe_null);
|
||||
key_buff+=keyinfo->key_part[i].store_length;
|
||||
}
|
||||
} /* not ftkey */
|
||||
*ref_key=0; // end_marker
|
||||
if (j->type == JT_FT) /* no-op */;
|
||||
else if (j->type == JT_CONST)
|
||||
j->table->const_table=1;
|
||||
else if (((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY))
|
||||
!= HA_NOSAME) ||
|
||||
keyparts != keyinfo->key_parts)
|
||||
j->type=JT_REF; /* Must read with repeat */
|
||||
else if (ref_key == j->ref.key_copy)
|
||||
{ /* Should never be reached */
|
||||
/*
|
||||
This happen if we are using a constant expression in the ON part
|
||||
of an LEFT JOIN.
|
||||
SELECT * FROM a LEFT JOIN b ON b.key=30
|
||||
Here we should not mark the table as a 'const' as a field may
|
||||
have a 'normal' value or a NULL value.
|
||||
*/
|
||||
j->type=JT_CONST;
|
||||
}
|
||||
else
|
||||
j->type=JT_EQ_REF;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static store_key *
|
||||
get_store_key(THD *thd, KEYUSE *keyuse, table_map used_tables,
|
||||
KEY_PART_INFO *key_part, char *key_buff, uint maybe_null)
|
||||
@ -3163,13 +3202,13 @@ remove_eq_conds(COND *cond,Item::cond_result *cond_value)
|
||||
((Item_func*) cond)->functype() == Item_func::ISNULL_FUNC)
|
||||
{
|
||||
/*
|
||||
** Handles this special case for some ODBC applications:
|
||||
** The are requesting the row that was just updated with a auto_increment
|
||||
** value with this construct:
|
||||
**
|
||||
** SELECT * from table_name where auto_increment_column IS NULL
|
||||
** This will be changed to:
|
||||
** SELECT * from table_name where auto_increment_column = LAST_INSERT_ID
|
||||
Handles this special case for some ODBC applications:
|
||||
The are requesting the row that was just updated with a auto_increment
|
||||
value with this construct:
|
||||
|
||||
SELECT * from table_name where auto_increment_column IS NULL
|
||||
This will be changed to:
|
||||
SELECT * from table_name where auto_increment_column = LAST_INSERT_ID
|
||||
*/
|
||||
|
||||
Item_func_isnull *func=(Item_func_isnull*) cond;
|
||||
@ -4365,45 +4404,44 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skipp_last)
|
||||
*****************************************************************************/
|
||||
|
||||
static int
|
||||
join_read_const_tables(JOIN *join)
|
||||
join_read_const_table(JOIN_TAB *tab, POSITION *pos)
|
||||
{
|
||||
uint i;
|
||||
int error;
|
||||
DBUG_ENTER("join_read_const_tables");
|
||||
for (i=0 ; i < join->const_tables ; i++)
|
||||
{
|
||||
TABLE *form=join->table[i];
|
||||
form->null_row=0;
|
||||
form->status=STATUS_NO_RECORD;
|
||||
DBUG_ENTER("join_read_const_table");
|
||||
TABLE *table=tab->table;
|
||||
table->const_table=1;
|
||||
table->null_row=0;
|
||||
table->status=STATUS_NO_RECORD;
|
||||
|
||||
if (join->join_tab[i].type == JT_SYSTEM)
|
||||
{
|
||||
if ((error=join_read_system(join->join_tab+i)))
|
||||
{ // Info for DESCRIBE
|
||||
join->join_tab[i].info="const row not found";
|
||||
join->best_positions[i].records_read=0.0;
|
||||
if (!form->outer_join || error > 0)
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
if (tab->type == JT_SYSTEM)
|
||||
{
|
||||
if ((error=join_read_system(tab)))
|
||||
{ // Info for DESCRIBE
|
||||
tab->info="const row not found";
|
||||
/* Mark for EXPLAIN that the row was not found */
|
||||
pos->records_read=0.0;
|
||||
if (!table->outer_join || error > 0)
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
else
|
||||
{
|
||||
if ((error=join_read_const(join->join_tab+i)))
|
||||
{
|
||||
join->join_tab[i].info="unique row not found";
|
||||
join->best_positions[i].records_read=0.0;
|
||||
if (!form->outer_join || error > 0)
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
}
|
||||
if (join->join_tab[i].on_expr && !form->null_row)
|
||||
{
|
||||
if ((form->null_row= test(join->join_tab[i].on_expr->val_int() == 0)))
|
||||
empty_record(form);
|
||||
}
|
||||
if (!form->null_row)
|
||||
form->maybe_null=0;
|
||||
}
|
||||
else
|
||||
{
|
||||
if ((error=join_read_const(tab)))
|
||||
{
|
||||
tab->info="unique row not found";
|
||||
/* Mark for EXPLAIN that the row was not found */
|
||||
pos->records_read=0.0;
|
||||
if (!table->outer_join || error > 0)
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
}
|
||||
if (tab->on_expr && !table->null_row)
|
||||
{
|
||||
if ((table->null_row= test(tab->on_expr->val_int() == 0)))
|
||||
empty_record(table);
|
||||
}
|
||||
if (!table->null_row)
|
||||
table->maybe_null=0;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
@ -4415,7 +4453,8 @@ join_read_system(JOIN_TAB *tab)
|
||||
int error;
|
||||
if (table->status & STATUS_GARBAGE) // If first read
|
||||
{
|
||||
if ((error=table->file->rnd_first(table->record[0])))
|
||||
if ((error=table->file->read_first_row(table->record[0],
|
||||
table->primary_key)))
|
||||
{
|
||||
if (error != HA_ERR_END_OF_FILE)
|
||||
{
|
||||
|
@ -158,7 +158,7 @@ class JOIN {
|
||||
uint send_group_parts;
|
||||
bool sort_and_group,first_record,full_join,group, no_field_update;
|
||||
bool do_send_rows;
|
||||
table_map const_table_map,outer_join;
|
||||
table_map const_table_map,found_const_table_map,outer_join;
|
||||
ha_rows send_records,found_records,examined_rows,row_limit;
|
||||
POSITION positions[MAX_TABLES+1],best_positions[MAX_TABLES+1];
|
||||
double best_read;
|
||||
|
Reference in New Issue
Block a user