1
0
mirror of https://github.com/MariaDB/server.git synced 2025-11-08 00:28:29 +03:00

Fixed typos

- Removed duplicate words, like "the the" and "to to"
- Removed duplicate lines (one double sort line found in mysql.cc)
- Fixed some typos found while searching for duplicate words.

Command used to find duplicate words:
egrep -rI "\s([a-zA-Z]+)\s+\1\s" | grep -v param

Thanks to Artjoms Rimdjonoks for the command and pointing out the
spelling errors.
This commit is contained in:
Monty
2025-08-31 17:04:19 +03:00
parent 25077539d7
commit 882f6fa3aa
86 changed files with 169 additions and 172 deletions

View File

@@ -1495,9 +1495,8 @@ The following specify which files/extra groups are read (specified before remain
--thread-pool-priority=name --thread-pool-priority=name
Threadpool priority. High priority connections usually Threadpool priority. High priority connections usually
start executing earlier than low priority.If priority set start executing earlier than low priority.If priority set
to 'auto', the the actual priority(low or high) is to 'auto', the actual priority(low or high) is determined
determined based on whether or not connection is inside based on whether or not connection is inside transaction.
transaction.
--thread-pool-size=# --thread-pool-size=#
Number of thread groups in the pool. This parameter is Number of thread groups in the pool. This parameter is
roughly equivalent to maximum number of concurrently roughly equivalent to maximum number of concurrently

View File

@@ -1015,7 +1015,7 @@ COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME FOREIGN_KEY_CHECKS VARIABLE_NAME FOREIGN_KEY_CHECKS
VARIABLE_SCOPE SESSION VARIABLE_SCOPE SESSION
VARIABLE_TYPE BOOLEAN VARIABLE_TYPE BOOLEAN
VARIABLE_COMMENT If set to 1 (the default) foreign key constraints (including ON UPDATE and ON DELETE behavior) InnoDB tables are checked, while if set to 0, they are not checked. 0 is not recommended for normal use, though it can be useful in situations where you know the data is consistent, but want to reload data in a different order from that that specified by parent/child relationships. Setting this variable to 1 does not retrospectively check for inconsistencies introduced while set to 0. VARIABLE_COMMENT If set to 1 (the default) foreign key constraints (including ON UPDATE and ON DELETE behavior) InnoDB tables are checked, while if set to 0, they are not checked. 0 is not recommended for normal use, though it can be useful in situations where you know the data is consistent, but want to reload data in a different order from that specified by parent/child relationships. Setting this variable to 1 does not retrospectively check for inconsistencies introduced while set to 0.
NUMERIC_MIN_VALUE NULL NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL NUMERIC_BLOCK_SIZE NULL

View File

@@ -1045,7 +1045,7 @@ COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME FOREIGN_KEY_CHECKS VARIABLE_NAME FOREIGN_KEY_CHECKS
VARIABLE_SCOPE SESSION VARIABLE_SCOPE SESSION
VARIABLE_TYPE BOOLEAN VARIABLE_TYPE BOOLEAN
VARIABLE_COMMENT If set to 1 (the default) foreign key constraints (including ON UPDATE and ON DELETE behavior) InnoDB tables are checked, while if set to 0, they are not checked. 0 is not recommended for normal use, though it can be useful in situations where you know the data is consistent, but want to reload data in a different order from that that specified by parent/child relationships. Setting this variable to 1 does not retrospectively check for inconsistencies introduced while set to 0. VARIABLE_COMMENT If set to 1 (the default) foreign key constraints (including ON UPDATE and ON DELETE behavior) InnoDB tables are checked, while if set to 0, they are not checked. 0 is not recommended for normal use, though it can be useful in situations where you know the data is consistent, but want to reload data in a different order from that specified by parent/child relationships. Setting this variable to 1 does not retrospectively check for inconsistencies introduced while set to 0.
NUMERIC_MIN_VALUE NULL NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL NUMERIC_BLOCK_SIZE NULL
@@ -4505,7 +4505,7 @@ COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME THREAD_POOL_PRIORITY VARIABLE_NAME THREAD_POOL_PRIORITY
VARIABLE_SCOPE SESSION VARIABLE_SCOPE SESSION
VARIABLE_TYPE ENUM VARIABLE_TYPE ENUM
VARIABLE_COMMENT Threadpool priority. High priority connections usually start executing earlier than low priority.If priority set to 'auto', the the actual priority(low or high) is determined based on whether or not connection is inside transaction. VARIABLE_COMMENT Threadpool priority. High priority connections usually start executing earlier than low priority.If priority set to 'auto', the actual priority(low or high) is determined based on whether or not connection is inside transaction.
NUMERIC_MIN_VALUE NULL NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL NUMERIC_BLOCK_SIZE NULL

View File

@@ -2029,7 +2029,7 @@ static int ddl_log_execute_action(THD *thd, MEM_ROOT *mem_root,
{ {
LEX_CUSTRING version= {ddl_log_entry->uuid, MY_UUID_SIZE}; LEX_CUSTRING version= {ddl_log_entry->uuid, MY_UUID_SIZE};
/* /*
Temporary .frm file exists. This means that that the table in Temporary .frm file exists. This means that the table in
the storage engine can be of either old or new version. the storage engine can be of either old or new version.
If old version, delete the new .frm table and keep the old one. If old version, delete the new .frm table and keep the old one.
If new version, replace the old .frm with the new one. If new version, replace the old .frm with the new one.

View File

@@ -74,7 +74,7 @@ struct st_debug_sync_control
/** /**
Definitions for the debug sync facility. Definitions for the debug sync facility.
1. Global string variable to hold a set of of "signals". 1. Global string variable to hold a set of "signals".
2. Global condition variable for signaling and waiting. 2. Global condition variable for signaling and waiting.
3. Global mutex to synchronize access to the above. 3. Global mutex to synchronize access to the above.
*/ */
@@ -1009,7 +1009,7 @@ static bool debug_sync_set_action(THD *thd, st_debug_sync_action *action)
If the terminator of the token is ASCII NUL ('\0'), it returns a If the terminator of the token is ASCII NUL ('\0'), it returns a
pointer to the terminator (string end). pointer to the terminator (string end).
If the terminator is a space character, it replaces the the first If the terminator is a space character, it replaces the first
byte of the terminator character by ASCII NUL ('\0'), skips the (now byte of the terminator character by ASCII NUL ('\0'), skips the (now
corrupted) terminator character, and skips all following space corrupted) terminator character, and skips all following space
characters. It returns a pointer to the next non-space character or characters. It returns a pointer to the next non-space character or

View File

@@ -167,7 +167,7 @@ deinit_event_thread(THD *thd)
thd The THD of the thread. Has to be allocated by the caller. thd The THD of the thread. Has to be allocated by the caller.
NOTES NOTES
1. The host of the thead is my_localhost 1. The host of the thread is my_localhost
2. thd->net is initted with NULL - no communication. 2. thd->net is initted with NULL - no communication.
*/ */

View File

@@ -1143,7 +1143,7 @@ Field_longstr::pack_sort_string(uchar *to, const SORT_FIELD_ATTR *sort_field)
@details @details
The function returns a double number between 0.0 and 1.0 as the relative The function returns a double number between 0.0 and 1.0 as the relative
position of the value of the this field in the numeric interval of [min,max]. position of the value of the this field in the numeric interval of [min,max].
If the value is not in the interval the the function returns 0.0 when If the value is not in the interval the function returns 0.0 when
the value is less than min, and, 1.0 when the value is greater than max. the value is less than min, and, 1.0 when the value is greater than max.
@param min value of the left end of the interval @param min value of the left end of the interval
@@ -1204,7 +1204,7 @@ static inline double safe_substract(ulonglong a, ulonglong b)
@details @details
The function returns a double number between 0.0 and 1.0 as the relative The function returns a double number between 0.0 and 1.0 as the relative
position of the value of the this field in the string interval of [min,max]. position of the value of the this field in the string interval of [min,max].
If the value is not in the interval the the function returns 0.0 when If the value is not in the interval the function returns 0.0 when
the value is less than min, and, 1.0 when the value is greater than max. the value is less than min, and, 1.0 when the value is greater than max.
@note @note

View File

@@ -2420,7 +2420,7 @@ get_addon_fields(TABLE *table, uint sortlength,
/* /*
If there is a reference to a field in the query add it If there is a reference to a field in the query add it
to the the set of appended fields. to the set of appended fields.
Note for future refinement: Note for future refinement:
This this a too strong condition. This this a too strong condition.
Actually we need only the fields referred in the Actually we need only the fields referred in the

View File

@@ -149,7 +149,7 @@ partition_notify_tabledef_changed(handlerton *,
/* /*
If frm_error() is called then we will use this to to find out what file If frm_error() is called then we will use this to find out what file
extensions exist for the storage engine. This is also used by the default extensions exist for the storage engine. This is also used by the default
rename_table and delete_table method in handler.cc. rename_table and delete_table method in handler.cc.
*/ */
@@ -9791,7 +9791,7 @@ ha_rows ha_partition::min_rows_for_estimate()
All partitions might have been left as unused during partition pruning All partitions might have been left as unused during partition pruning
due to, for example, an impossible WHERE condition. Nonetheless, the due to, for example, an impossible WHERE condition. Nonetheless, the
optimizer might still attempt to perform (e.g. range) analysis where an optimizer might still attempt to perform (e.g. range) analysis where an
estimate of the the number of rows is calculated using records_in_range. estimate of the number of rows is calculated using records_in_range.
Hence, to handle this and other possible cases, use zero as the minimum Hence, to handle this and other possible cases, use zero as the minimum
number of rows to base the estimate on if no partition is being used. number of rows to base the estimate on if no partition is being used.
*/ */

View File

@@ -271,7 +271,7 @@ typedef struct st_partition_part_key_multi_range_hld
/* Owner object */ /* Owner object */
ha_partition *partition; ha_partition *partition;
/* id of the the partition this structure is for */ /* id of the partition this structure is for */
uint32 part_id; uint32 part_id;
/* Current range we're iterating through */ /* Current range we're iterating through */

View File

@@ -64,7 +64,7 @@ struct Listener
} }
/** /**
if not NULL, this handle can be be used in WaitForSingle/MultipleObject(s). if not NULL, this handle can be used in WaitForSingle/MultipleObject(s).
This handle will be closed when object is destroyed. This handle will be closed when object is destroyed.
If NULL, the completion notification happens in threadpool. If NULL, the completion notification happens in threadpool.

View File

@@ -4509,7 +4509,7 @@ void handler::print_error(int error, myf errflag)
case HA_ERR_RECORD_DELETED: case HA_ERR_RECORD_DELETED:
case HA_ERR_END_OF_FILE: case HA_ERR_END_OF_FILE:
/* /*
This errors is not not normally fatal (for example for reads). However This errors is not normally fatal (for example for reads). However
if you get it during an update or delete, then its fatal. if you get it during an update or delete, then its fatal.
As the user is calling print_error() (which is not done on read), we As the user is calling print_error() (which is not done on read), we
assume something when wrong with the update or delete. assume something when wrong with the update or delete.

View File

@@ -2591,7 +2591,7 @@ public:
bool online= false; bool online= false;
/** /**
When ha_commit_inplace_alter_table() is called the the engine can When ha_commit_inplace_alter_table() is called the engine can
set this to a function to be called after the ddl log set this to a function to be called after the ddl log
is committed. is committed.
*/ */
@@ -4049,7 +4049,7 @@ public:
virtual int extra_opt(enum ha_extra_function operation, ulong arg) virtual int extra_opt(enum ha_extra_function operation, ulong arg)
{ return extra(operation); } { return extra(operation); }
/* /*
Table version id for the the table. This should change for each Table version id for the table. This should change for each
sucessfull ALTER TABLE. sucessfull ALTER TABLE.
This is used by the handlerton->check_version() to ask the engine This is used by the handlerton->check_version() to ask the engine
if the table definition has been updated. if the table definition has been updated.

View File

@@ -881,7 +881,7 @@ bool Item_field::find_item_in_field_list_processor(void *arg)
Mark field in read_map Mark field in read_map
NOTES NOTES
This is used by filesort to register used fields in a a temporary This is used by filesort to register used fields in a temporary
column read set or to register used fields in a view or check constraint column read set or to register used fields in a view or check constraint
*/ */
@@ -1385,7 +1385,7 @@ Item *Item_num::safe_charset_converter(THD *thd, CHARSET_INFO *tocs)
Create character set converter for constant items Create character set converter for constant items
using Item_null, Item_string or Item_static_string_func. using Item_null, Item_string or Item_static_string_func.
@param tocs Character set to to convert the string to. @param tocs Character set to convert the string to.
@param lossless Whether data loss is acceptable. @param lossless Whether data loss is acceptable.
@param func_name Function name, or NULL. @param func_name Function name, or NULL.
@@ -2684,7 +2684,7 @@ bool Type_std_attributes::agg_item_set_converter(const DTCollation &coll,
/* /*
For better error reporting: save the first and the second argument. For better error reporting: save the first and the second argument.
We need this only if the the number of args is 3 or 2: We need this only if the number of args is 3 or 2:
- for a longer argument list, "Illegal mix of collations" - for a longer argument list, "Illegal mix of collations"
doesn't display each argument's characteristics. doesn't display each argument's characteristics.
- if nargs is 1, then this error cannot happen. - if nargs is 1, then this error cannot happen.
@@ -4032,7 +4032,7 @@ void Item_string::print(String *str, enum_query_type query_type)
- utf8mb3 does not work well with non-BMP characters (e.g. emoji). - utf8mb3 does not work well with non-BMP characters (e.g. emoji).
- Simply changing utf8mb3 to utf8mb4 will not fully help: - Simply changing utf8mb3 to utf8mb4 will not fully help:
some character sets have unassigned characters, some character sets have unassigned characters,
they get lost during during cs->utf8mb4->cs round trip. they get lost during cs->utf8mb4->cs round trip.
*/ */
str_value.print_with_conversion(str, str->charset()); str_value.print_with_conversion(str, str->charset());
} }

View File

@@ -1470,7 +1470,7 @@ bool Item_in_optimizer::fix_fields(THD *thd, Item **ref)
- subqueries that were originally EXISTS subqueries (and were coinverted by - subqueries that were originally EXISTS subqueries (and were coinverted by
the EXISTS->IN rewrite) the EXISTS->IN rewrite)
When Item_in_optimizer is not not working as a pass-through, it When Item_in_optimizer is not working as a pass-through, it
- caches its "left argument", args[0]. - caches its "left argument", args[0].
- makes adjustments to subquery item's return value for proper NULL - makes adjustments to subquery item's return value for proper NULL
value handling value handling
@@ -1507,7 +1507,7 @@ bool Item_in_optimizer::walk(Item_processor processor,
@details @details
The function checks whether an expression cache is needed for this item The function checks whether an expression cache is needed for this item
and if if so wraps the item into an item of the class and if so wraps the item into an item of the class
Item_cache_wrapper with an appropriate expression cache set up there. Item_cache_wrapper with an appropriate expression cache set up there.
@note @note
@@ -1758,7 +1758,7 @@ bool Item_in_optimizer::is_null()
@detail @detail
Recursively transform the left and the right operand of this Item. The Recursively transform the left and the right operand of this Item. The
Right operand is an Item_in_subselect or its subclass. To avoid the Right operand is an Item_in_subselect or its subclass. To avoid the
creation of new Items, we use the fact the the left operand of the creation of new Items, we use the fact the left operand of the
Item_in_subselect is the same as the one of 'this', so instead of Item_in_subselect is the same as the one of 'this', so instead of
transforming its operand, we just assign the left operand of the transforming its operand, we just assign the left operand of the
Item_in_subselect to be equal to the left operand of 'this'. Item_in_subselect to be equal to the left operand of 'this'.
@@ -6956,7 +6956,7 @@ void Item_equal::add_const(THD *thd, Item *c)
- Also, Field_str::test_if_equality_guarantees_uniqueness() guarantees - Also, Field_str::test_if_equality_guarantees_uniqueness() guarantees
that the comparison collation of all equalities handled by Item_equal that the comparison collation of all equalities handled by Item_equal
match the the collation of the field. match the collation of the field.
Therefore, at Item_equal::add_const() time all constants constXXX Therefore, at Item_equal::add_const() time all constants constXXX
should be directly comparable to each other without an additional should be directly comparable to each other without an additional

View File

@@ -3371,7 +3371,7 @@ class Item_equal: public Item_bool_func
List<Item> equal_items; List<Item> equal_items;
/* /*
TRUE <-> one of the items is a const item. TRUE <-> one of the items is a const item.
Such item is always first in in the equal_items list Such item is always first in the equal_items list
*/ */
bool with_const; bool with_const;
/* /*

View File

@@ -4892,7 +4892,7 @@ Item_func_set_user_var::fix_length_and_dec(THD *thd)
Mark field in read_map Mark field in read_map
NOTES NOTES
This is used by filesort to register used fields in a a temporary This is used by filesort to register used fields in a temporary
column read set or to register used fields in a view column read set or to register used fields in a view
*/ */

View File

@@ -289,7 +289,7 @@ String *Item_func_sha2::val_str_ascii(String *str)
/* Convert the large number to a string-hex representation. */ /* Convert the large number to a string-hex representation. */
array_to_hex((char *) str->ptr(), digest_buf, (uint)digest_length); array_to_hex((char *) str->ptr(), digest_buf, (uint)digest_length);
/* We poked raw bytes in. We must inform the the String of its length. */ /* We poked raw bytes in. We must inform the String of its length. */
str->length((uint) digest_length*2); /* Each byte as two nybbles */ str->length((uint) digest_length*2); /* Each byte as two nybbles */
null_value= FALSE; null_value= FALSE;

View File

@@ -1372,7 +1372,7 @@ bool subselect_single_select_engine::always_returns_one_row() const
@details @details
The function checks whether an expression cache is needed for this item The function checks whether an expression cache is needed for this item
and if if so wraps the item into an item of the class and if so wraps the item into an item of the class
Item_cache_wrapper with an appropriate expression cache set up there. Item_cache_wrapper with an appropriate expression cache set up there.
@note @note
@@ -1757,7 +1757,7 @@ bool Item_in_subselect::fix_length_and_dec()
@details @details
The function checks whether an expression cache is needed for this item The function checks whether an expression cache is needed for this item
and if if so wraps the item into an item of the class and if so wraps the item into an item of the class
Item_cache_wrapper with an appropriate expression cache set up there. Item_cache_wrapper with an appropriate expression cache set up there.
@note @note

View File

@@ -136,7 +136,7 @@ bool Item_sum::init_sum_func_check(THD *thd)
If the context conditions are not met the method reports an error. If the context conditions are not met the method reports an error.
If the set function is aggregated in some outer subquery the method If the set function is aggregated in some outer subquery the method
adds it to the chain of items for such set functions that is attached adds it to the chain of items for such set functions that is attached
to the the st_select_lex structure for this subquery. to the st_select_lex structure for this subquery.
A number of designated members of the object are used to check the A number of designated members of the object are used to check the
conditions. They are specified in the comment before the Item_sum conditions. They are specified in the comment before the Item_sum
@@ -4388,7 +4388,7 @@ bool Item_func_group_concat::setup(THD *thd)
/* /*
Convert bit fields to bigint's in the temporary table. Convert bit fields to bigint's in the temporary table.
Needed as we cannot compare two table records containing BIT fields Needed as we cannot compare two table records containing BIT fields
stored in the the tree used for distinct/order by. stored in the tree used for distinct/order by.
Moreover we don't even save in the tree record null bits Moreover we don't even save in the tree record null bits
where BIT fields store parts of their data. where BIT fields store parts of their data.
*/ */

View File

@@ -139,7 +139,7 @@ class Window_spec;
The general rule to detect whether a set function is legal in a query with The general rule to detect whether a set function is legal in a query with
nested subqueries is much more complicated. nested subqueries is much more complicated.
Consider the the following query: Consider the following query:
SELECT t1.a FROM t1 GROUP BY t1.a SELECT t1.a FROM t1 GROUP BY t1.a
HAVING t1.a > ALL (SELECT t2.c FROM t2 WHERE SUM(t1.b) < t2.c). HAVING t1.a > ALL (SELECT t2.c FROM t2 WHERE SUM(t1.b) < t2.c).
The set function SUM(b) is used here in the WHERE clause of the subquery. The set function SUM(b) is used here in the WHERE clause of the subquery.

View File

@@ -1050,7 +1050,7 @@ uint week_mode(uint mode)
If set Monday is first day of week If set Monday is first day of week
WEEK_YEAR (1) If not set Week is in range 0-53 WEEK_YEAR (1) If not set Week is in range 0-53
Week 0 is returned for the the last week of the previous year (for Week 0 is returned for the last week of the previous year (for
a date at start of january) In this case one can get 53 for the a date at start of january) In this case one can get 53 for the
first week of next year. This flag ensures that the week is first week of next year. This flag ensures that the week is
relevant for the given year. Note that this flag is only relevant for the given year. Note that this flag is only

View File

@@ -3305,7 +3305,7 @@ void MYSQL_QUERY_LOG::reopen_file()
DESCRIPTION DESCRIPTION
Log given command to to normal (not rotable) log file Log given command to normal (not rotable) log file
RETURN RETURN
FASE - OK FASE - OK
@@ -7479,7 +7479,7 @@ void MYSQL_BIN_LOG::checkpoint_and_purge(ulong binlog_id)
/** /**
Searches for the first (oldest) binlog file name in in the binlog index. Searches for the first (oldest) binlog file name in the binlog index.
@param[in,out] buf_arg pointer to a buffer to hold found @param[in,out] buf_arg pointer to a buffer to hold found
the first binary log file name the first binary log file name

View File

@@ -1224,8 +1224,8 @@ inline bool normalize_binlog_name(char *to, const char *from, bool is_relay_log)
/* opt_name is not null and not empty and from is a relative path */ /* opt_name is not null and not empty and from is a relative path */
if (opt_name && opt_name[0] && from && !test_if_hard_path(from)) if (opt_name && opt_name[0] && from && !test_if_hard_path(from))
{ {
// take the path from opt_name // take the path from "opt_name"
// take the filename from from // take the filename from "from"
char log_dirpart[FN_REFLEN], log_dirname[FN_REFLEN]; char log_dirpart[FN_REFLEN], log_dirname[FN_REFLEN];
size_t log_dirpart_len, log_dirname_len; size_t log_dirpart_len, log_dirname_len;
dirname_part(log_dirpart, opt_name, &log_dirpart_len); dirname_part(log_dirpart, opt_name, &log_dirpart_len);

View File

@@ -1004,7 +1004,7 @@ bool MDL_context::fix_pins()
@param mdl_namespace Id of namespace of object to be locked @param mdl_namespace Id of namespace of object to be locked
@param db Name of database to which the object belongs @param db Name of database to which the object belongs
@param name Name of of the object @param name Name of the object
@param mdl_type The MDL lock type for the request. @param mdl_type The MDL lock type for the request.
*/ */

View File

@@ -172,7 +172,7 @@ enum enum_mdl_type {
cases when we only need to access metadata and not data, e.g. when cases when we only need to access metadata and not data, e.g. when
filling an INFORMATION_SCHEMA table. filling an INFORMATION_SCHEMA table.
Since SH lock is compatible with SNRW lock, the connection that Since SH lock is compatible with SNRW lock, the connection that
holds SH lock lock should not try to acquire any kind of table-level holds the SH lock should not try to acquire any kind of table-level
or row-level lock, as this can lead to a deadlock. Moreover, after or row-level lock, as this can lead to a deadlock. Moreover, after
acquiring SH lock, the connection should not wait for any other acquiring SH lock, the connection should not wait for any other
resource, as it might cause starvation for X locks and a potential resource, as it might cause starvation for X locks and a potential
@@ -414,8 +414,8 @@ public:
@param mdl_namespace Id of namespace of object to be locked @param mdl_namespace Id of namespace of object to be locked
@param db Name of database to which the object belongs @param db Name of database to which the object belongs
@param name Name of of the object @param name Name of the object
@param key Where to store the the MDL key. @param key Where to store the MDL key.
*/ */
void mdl_key_init(enum_mdl_namespace mdl_namespace_arg, void mdl_key_init(enum_mdl_namespace mdl_namespace_arg,
const char *db, const char *name_arg) const char *db, const char *name_arg)

View File

@@ -93,7 +93,7 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
*/ */
ulonglong single_point_ranges= 0; ulonglong single_point_ranges= 0;
/* /*
The counter of of single point ranges that we succeded to assign The counter of single point ranges that we succeded to assign
to some blocks to some blocks
*/ */
ulonglong assigned_single_point_ranges= 0; ulonglong assigned_single_point_ranges= 0;

View File

@@ -7751,7 +7751,6 @@ static void print_help()
sys_var_add_options(&all_options, sys_var::PARSE_EARLY); sys_var_add_options(&all_options, sys_var::PARSE_EARLY);
add_plugin_options(&all_options, &mem_root); add_plugin_options(&all_options, &mem_root);
sort_dynamic(&all_options, (qsort_cmp) option_cmp); sort_dynamic(&all_options, (qsort_cmp) option_cmp);
sort_dynamic(&all_options, (qsort_cmp) option_cmp);
add_terminator(&all_options); add_terminator(&all_options);
my_print_help((my_option*) all_options.buffer); my_print_help((my_option*) all_options.buffer);

View File

@@ -5852,7 +5852,7 @@ ha_rows records_in_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
SYNOPSIS SYNOPSIS
prepare_search_best_index_intersect() prepare_search_best_index_intersect()
param common info about index ranges param common info about index ranges
tree tree of ranges for indexes than can be intersected tree tree of ranges for indexes that can be intersected
common OUT info needed for search to be filled by the function common OUT info needed for search to be filled by the function
init OUT info for an initial pseudo step of the intersection plans init OUT info for an initial pseudo step of the intersection plans
cutoff_cost cut off cost of the interesting index intersection cutoff_cost cut off cost of the interesting index intersection
@@ -6505,7 +6505,7 @@ void find_index_intersect_best_extension(PARTIAL_INDEX_INTERSECT_INFO *curr)
SYNOPSIS SYNOPSIS
get_best_index_intersect() get_best_index_intersect()
param common info about index ranges param common info about index ranges
tree tree of ranges for indexes than can be intersected tree tree of ranges for indexes that can be intersected
read_time cut off value for the evaluated plans read_time cut off value for the evaluated plans
DESCRIPTION DESCRIPTION
@@ -8332,7 +8332,7 @@ SEL_TREE *Item_func_in::get_func_row_mm_tree(RANGE_OPT_PARAM *param,
res_tree= 0; res_tree= 0;
break; break;
} }
/* Join the disjunct the the OR tree that is being constructed */ /* Join the disjunct the OR tree that is being constructed */
res_tree= !res_tree ? and_tree : tree_or(param, res_tree, and_tree); res_tree= !res_tree ? and_tree : tree_or(param, res_tree, and_tree);
} }
if (omitted_tuples == argument_count() - 1) if (omitted_tuples == argument_count() - 1)
@@ -9486,7 +9486,7 @@ int and_range_trees(RANGE_OPT_PARAM *param, SEL_TREE *tree1, SEL_TREE *tree2,
tree2 represents the formula RT2 AND MT2 tree2 represents the formula RT2 AND MT2
where RT2 = R2_1 AND ... AND R2_k2, MT2=M2_1 AND ... AND M2_l2. where RT2 = R2_1 AND ... AND R2_k2, MT2=M2_1 AND ... AND M2_l2.
The result tree will represent the formula of the the following structure: The result tree will represent the formula of the following structure:
RT AND RT1MT2 AND RT2MT1, such that RT AND RT1MT2 AND RT2MT1, such that
rt is a tree obtained by range intersection of trees tree1 and tree2, rt is a tree obtained by range intersection of trees tree1 and tree2,
RT1MT2 = RT1M2_1 AND ... AND RT1M2_l2, RT1MT2 = RT1M2_1 AND ... AND RT1M2_l2,
@@ -9574,7 +9574,7 @@ SEL_TREE *tree_and(RANGE_OPT_PARAM *param, SEL_TREE *tree1, SEL_TREE *tree2)
For each imerge in 'tree' that contains only one disjunct tree, i.e. For each imerge in 'tree' that contains only one disjunct tree, i.e.
for any imerge of the form m=rt, the function performs and operation for any imerge of the form m=rt, the function performs and operation
the range part of tree, replaces rt the with the result of anding and the range part of tree, replaces rt the with the result of anding and
removes imerge m from the the merge part of 'tree'. removes imerge m from the merge part of 'tree'.
RETURN VALUE RETURN VALUE
none none
@@ -10387,7 +10387,7 @@ SEL_ARG *key_and_with_limit(RANGE_OPT_PARAM *param, uint keyno,
( 1 < kp1 <= 2 AND ( kp2 = 2 OR kp2 = 3 ) ) OR kp1 = 3 ( 1 < kp1 <= 2 AND ( kp2 = 2 OR kp2 = 3 ) ) OR kp1 = 3
Is a a valid SER_ARG expression for a key of at least 2 keyparts. Is a valid SER_ARG expression for a key of at least 2 keyparts.
For simplicity, we will assume that expr2 is a single range predicate, For simplicity, we will assume that expr2 is a single range predicate,
i.e. on the form ( a < x < b AND ... ). It is easy to generalize to a i.e. on the form ( a < x < b AND ... ). It is easy to generalize to a
@@ -11820,7 +11820,7 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
{ {
/* /*
For any index the total number of records within all ranges For any index the total number of records within all ranges
cannot be be bigger than the number of records in the table. cannot be bigger than the number of records in the table.
This check is needed as sometimes that table statistics or range This check is needed as sometimes that table statistics or range
estimates may be slightly out of sync. estimates may be slightly out of sync.
*/ */
@@ -15480,7 +15480,7 @@ bool QUICK_GROUP_MIN_MAX_SELECT::add_range(SEL_ARG *sel_range)
NOTES NOTES
quick_prefix_select is made over the conditions on the whole key. quick_prefix_select is made over the conditions on the whole key.
It defines a number of ranges of length x. It defines a number of ranges of length x.
However when jumping through the prefixes we use only the the first However when jumping through the prefixes we use only the first
few most significant keyparts in the range key. However if there few most significant keyparts in the range key. However if there
are more keyparts to follow the ones we are using we must make the are more keyparts to follow the ones we are using we must make the
condition on the key inclusive (because x < "ab" means condition on the key inclusive (because x < "ab" means

View File

@@ -43,7 +43,7 @@
The execution of the transformed query (Q1R) follows these steps: The execution of the transformed query (Q1R) follows these steps:
1. For each row of t1 where t1.b < const a temporary table 1. For each row of t1 where t1.b < const a temporary table
containing all rows of of t2 with t2.a = t1.a is created containing all rows of t2 with t2.a = t1.a is created
2. If there are any rows in the temporary table aggregation 2. If there are any rows in the temporary table aggregation
is performed for them is performed for them
3. The result of the aggregation is joined with t1. 3. The result of the aggregation is joined with t1.
@@ -155,7 +155,7 @@
subsets the operation can applied to each subset independently. In this case subsets the operation can applied to each subset independently. In this case
all rows are first partitioned into the groups each of which contains all the all rows are first partitioned into the groups each of which contains all the
rows from the partitions belonging the same subset and then each group rows from the partitions belonging the same subset and then each group
is subpartitioned into groups in the the post join operation. is subpartitioned into groups in the post join operation.
The set of all rows belonging to the union of several partitions is called The set of all rows belonging to the union of several partitions is called
here superpartition. If a grouping operation is defined by the list here superpartition. If a grouping operation is defined by the list
@@ -1237,7 +1237,7 @@ bool JOIN::inject_best_splitting_cond(table_map excluded_tables)
Test if equality is injected for split optimization Test if equality is injected for split optimization
@param @param
eq_item equality to to test eq_item equality to test
@retval @retval
true eq_item is equality injected for split optimization true eq_item is equality injected for split optimization

View File

@@ -3657,7 +3657,7 @@ void JOIN::dbug_verify_sj_inner_tables(uint prefix_size) const
#endif #endif
/* /*
Remove the last join tab from from join->cur_sj_inner_tables bitmap Remove the last join tab from join->cur_sj_inner_tables bitmap
@note @note
remaining_tables contains @tab. remaining_tables contains @tab.
@@ -4901,7 +4901,7 @@ int SJ_TMP_TABLE::sj_weedout_check_row(THD *thd)
ptr= tmp_table->record[0] + 1; ptr= tmp_table->record[0] + 1;
/* Put the the rowids tuple into table->record[0]: */ /* Put the rowids tuple into table->record[0]: */
// 1. Store the length // 1. Store the length
if (((Field_varstring*)(tmp_table->field[0]))->length_bytes == 1) if (((Field_varstring*)(tmp_table->field[0]))->length_bytes == 1)

View File

@@ -239,7 +239,7 @@ public:
/* /*
Now find out how many different keys we will get (for now we Now find out how many different keys we will get (for now we
ignore the fact that we have "keypart_i=const" restriction for ignore the fact that we have "keypart_i=const" restriction for
some key components, that may make us think think that loose some key components, that may make us think that loose
scan will produce more distinct records than it actually will) scan will produce more distinct records than it actually will)
*/ */
ulong rpc; ulong rpc;

View File

@@ -1013,7 +1013,7 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref,
@param[in] ref Reference to the key value and info @param[in] ref Reference to the key value and info
@param[in] field Field used the MIN/MAX expression @param[in] field Field used the MIN/MAX expression
@param[in] cond WHERE condition @param[in] cond WHERE condition
@param[in] range_fl Says whether there is a condition to to be checked @param[in] range_fl Says whether there is a condition to be checked
@param[in] prefix_len Length of the constant part of the key @param[in] prefix_len Length of the constant part of the key
@retval @retval

View File

@@ -944,7 +944,7 @@ bool check_func_dependency(JOIN *join,
dac.usable_tables= dep_tables; dac.usable_tables= dep_tables;
/* /*
Analyze the the ON expression and create Dep_module_expr objects and Analyze the ON expression and create Dep_module_expr objects and
Dep_value_field objects for the used fields. Dep_value_field objects for the used fields.
*/ */
uint and_level=0; uint and_level=0;
@@ -1411,7 +1411,7 @@ void build_eq_mods_for_cond(THD *thd, Dep_analysis_context *ctx,
= AND_ij (fdep_A_[i] OR fdep_B_[j]) = AND_ij (fdep_A_[i] OR fdep_B_[j])
Then we walk over the obtained "fdep_A_[i] OR fdep_B_[j]" pairs, and Then we walk over the obtained "fdep_A_[i] OR fdep_B_[j]" pairs, and
- Discard those that that have left and right part referring to different - Discard those that have left and right part referring to different
columns. We can't infer anything useful from "col1=expr1 OR col2=expr2". columns. We can't infer anything useful from "col1=expr1 OR col2=expr2".
- When left and right parts refer to the same column, we check if they are - When left and right parts refer to the same column, we check if they are
essentially the same. essentially the same.

View File

@@ -452,7 +452,7 @@ void Range_rowid_filter_cost_info::trace_info(THD *thd)
@details @details
The function looks through the array of cost info for range filters The function looks through the array of cost info for range filters
and chooses the element for the range filter that promise the greatest and chooses the element for the range filter that promise the greatest
gain with the the ref or range access of the table by access_key_no. gain with the ref or range access of the table by access_key_no.
As the array is sorted by cross_x in ascending order the function stops As the array is sorted by cross_x in ascending order the function stops
the look through as soon as it reaches the first element with the look through as soon as it reaches the first element with
cross_x_adj > records because the range filter for this element and the cross_x_adj > records because the range filter for this element and the

View File

@@ -62,7 +62,7 @@
--------------------------- ---------------------------
If the search structure to test whether an element is in F can be fully If the search structure to test whether an element is in F can be fully
placed in RAM then this test is expected to be be much cheaper than a random placed in RAM then this test is expected to be much cheaper than a random
access of a record from Ti. We'll consider two search structures for access of a record from Ti. We'll consider two search structures for
pk-filters: ordered array and bloom filter. Ordered array is easy to pk-filters: ordered array and bloom filter. Ordered array is easy to
implement, but it's space consuming. If a filter contains primary keys implement, but it's space consuming. If a filter contains primary keys

View File

@@ -482,7 +482,7 @@ Rpl_filter::add_table_rule(HASH* h, const char* table_spec)
{ {
const char* dot = strchr(table_spec, '.'); const char* dot = strchr(table_spec, '.');
if (!dot) return 1; if (!dot) return 1;
// len is always > 0 because we know the there exists a '.' // len is always > 0 because we know there exists a '.'
uint len = (uint)strlen(table_spec); uint len = (uint)strlen(table_spec);
TABLE_RULE_ENT* e = (TABLE_RULE_ENT*)my_malloc(key_memory_TABLE_RULE_ENT, TABLE_RULE_ENT* e = (TABLE_RULE_ENT*)my_malloc(key_memory_TABLE_RULE_ENT,
sizeof(TABLE_RULE_ENT) + len, sizeof(TABLE_RULE_ENT) + len,

View File

@@ -162,7 +162,7 @@ pack_row(TABLE *table, MY_BITMAP const* cols,
@note The relay log information can be NULL, which means that no @note The relay log information can be NULL, which means that no
checking or comparison with the source table is done, simply checking or comparison with the source table is done, simply
because it is not used. This feature is used by MySQL Backup to because it is not used. This feature is used by MySQL Backup to
unpack a row from from the backup image, but can be used for other unpack a row from the backup image, but can be used for other
purposes as well. purposes as well.
@param rli Relay log info, which can be NULL @param rli Relay log info, which can be NULL

View File

@@ -260,7 +260,7 @@ public:
bool sql_thread_caught_up; bool sql_thread_caught_up;
/** /**
Simple setter for @ref worker_threads_caught_up; Simple setter for @ref worker_threads_caught_up;
sets it `false` to to indicate new user events in queue sets it `false` to indicate new user events in queue
@pre @ref data_lock held to prevent race with is_threads_caught_up() @pre @ref data_lock held to prevent race with is_threads_caught_up()
*/ */
inline void unset_worker_threads_caught_up() inline void unset_worker_threads_caught_up()

View File

@@ -351,7 +351,7 @@ ER_CANT_GET_WD
serbian "Ne mogu da dobijem trenutni direktorijum (errno: %M)" serbian "Ne mogu da dobijem trenutni direktorijum (errno: %M)"
slo "Nemôžem zistiť pracovný adresár (chybový kód: %M)" slo "Nemôžem zistiť pracovný adresár (chybový kód: %M)"
spa "No puedo obtener directorio de trabajo (Error: %M)" spa "No puedo obtener directorio de trabajo (Error: %M)"
swe "Kan inte inte läsa aktivt bibliotek. (Felkod: %M)" swe "Kan inte läsa aktivt bibliotek. (Felkod: %M)"
ukr "Не можу визначити робочу теку (помилка: %M)" ukr "Не можу визначити робочу теку (помилка: %M)"
ER_CANT_LOCK ER_CANT_LOCK
chi "无法锁定文件(错误号码:%M" chi "无法锁定文件(错误号码:%M"
@@ -2292,7 +2292,7 @@ ER_WRONG_SUB_KEY
ita "Sotto-parte della chiave errata. La parte di chiave utilizzata non e` una stringa o la lunghezza e` maggiore della parte di chiave" ita "Sotto-parte della chiave errata. La parte di chiave utilizzata non e` una stringa o la lunghezza e` maggiore della parte di chiave"
jpn "キーのプレフィックスが不正です。キーが文字列ではないか、プレフィックス長がキーよりも長いか、ストレージエンジンが一意索引のプレフィックス指定をサポートしていません。" jpn "キーのプレフィックスが不正です。キーが文字列ではないか、プレフィックス長がキーよりも長いか、ストレージエンジンが一意索引のプレフィックス指定をサポートしていません。"
kor "부정확한 서버 파트 키. 사용된 키 파트가 스트링이 아니거나 키 파트의 길이가 너무 깁니다." kor "부정확한 서버 파트 키. 사용된 키 파트가 스트링이 아니거나 키 파트의 길이가 너무 깁니다."
nla "Foutief sub-gedeelte van de zoeksleutel. De gebruikte zoeksleutel is geen onderdeel van een string of of de gebruikte lengte is langer dan de zoeksleutel" nla "Foutief sub-gedeelte van de zoeksleutel. De gebruikte zoeksleutel is geen onderdeel van een string of de gebruikte lengte is langer dan de zoeksleutel"
nor "Feil delnøkkel. Den brukte delnøkkelen er ikke en streng eller den oppgitte lengde er lengre enn nøkkel lengden" nor "Feil delnøkkel. Den brukte delnøkkelen er ikke en streng eller den oppgitte lengde er lengre enn nøkkel lengden"
norwegian-ny "Feil delnykkel. Den brukte delnykkelen er ikkje ein streng eller den oppgitte lengda er lengre enn nykkellengden" norwegian-ny "Feil delnykkel. Den brukte delnykkelen er ikkje ein streng eller den oppgitte lengda er lengre enn nykkellengden"
pol "Błędna podczę?ć klucza. Użyta czę?ć klucza nie jest łańcuchem lub użyta długo?ć jest większa niż czę?ć klucza" pol "Błędna podczę?ć klucza. Użyta czę?ć klucza nie jest łańcuchem lub użyta długo?ć jest większa niż czę?ć klucza"
@@ -4548,7 +4548,7 @@ ER_FT_MATCHING_KEY_NOT_FOUND
eng "Can't find FULLTEXT index matching the column list" eng "Can't find FULLTEXT index matching the column list"
est "Ei suutnud leida FULLTEXT indeksit, mis kattuks kasutatud tulpadega" est "Ei suutnud leida FULLTEXT indeksit, mis kattuks kasutatud tulpadega"
fre "Impossible de trouver un index FULLTEXT correspondant à cette liste de colonnes" fre "Impossible de trouver un index FULLTEXT correspondant à cette liste de colonnes"
ger "Kann keinen FULLTEXT-Index finden, der der Feldliste entspricht" ger "Kann keinen FULLTEXT-Index finden, der Feldliste entspricht"
geo "სვეტების სიის შესაბამისი FULLTEXT ინდექსი ვერ ვიპოვე" geo "სვეტების სიის შესაბამისი FULLTEXT ინდექსი ვერ ვიპოვე"
ita "Impossibile trovare un indice FULLTEXT che corrisponda all'elenco delle colonne" ita "Impossibile trovare un indice FULLTEXT che corrisponda all'elenco delle colonne"
jpn "列リストに対応する全文索引(FULLTEXT)が見つかりません。" jpn "列リストに対応する全文索引(FULLTEXT)が見つかりません。"

View File

@@ -999,7 +999,7 @@ int terminate_slave_threads(Master_info* mi,int thread_mask,bool skip_lock)
This function is called after requesting the thread to terminate This function is called after requesting the thread to terminate
(by setting @c abort_slave member of @c Relay_log_info or @c (by setting @c abort_slave member of @c Relay_log_info or @c
Master_info structure to 1). Termination of the thread is Master_info structure to 1). Termination of the thread is
controlled with the the predicate <code>*slave_running</code>. controlled with the predicate <code>*slave_running</code>.
Function will acquire @c term_lock before waiting on the condition Function will acquire @c term_lock before waiting on the condition
unless @c skip_lock is true in which case the mutex should be owned unless @c skip_lock is true in which case the mutex should be owned

View File

@@ -339,7 +339,7 @@ public:
/// ///
/// sp_pcontext objects are organized in a tree according to the following /// sp_pcontext objects are organized in a tree according to the following
/// rules: /// rules:
/// - one sp_pcontext object corresponds for for each BEGIN..END block; /// - one sp_pcontext object corresponds for each BEGIN..END block;
/// - one sp_pcontext object corresponds for each exception handler; /// - one sp_pcontext object corresponds for each exception handler;
/// - one additional sp_pcontext object is created to contain /// - one additional sp_pcontext object is created to contain
/// Stored Program parameters. /// Stored Program parameters.

View File

@@ -77,7 +77,7 @@
There are two complications caused by multiple wild_many characters. There are two complications caused by multiple wild_many characters.
For, say, two wild_many characters, either can accept any number of utf8 For, say, two wild_many characters, either can accept any number of utf8
characters, as long the the total amount of them is less then or equal to L. characters, as long the total amount of them is less than or equal to L.
Same logic applies to any number of non-consequent wild_many characters Same logic applies to any number of non-consequent wild_many characters
(consequent wild_many characters count as one). This gives the number of (consequent wild_many characters count as one). This gives the number of
matching strings of matching strings of

View File

@@ -57,7 +57,7 @@ public:
- We don't put this value as a static const inside the class, - We don't put this value as a static const inside the class,
because "gdb" would display it every time when we do "print" because "gdb" would display it every time when we do "print"
for a time_round_mode_t value. for a time_round_mode_t value.
- We can't put into into a function returning this value, because - We can't put into a function returning this value, because
it's not allowed to use functions in static_assert. it's not allowed to use functions in static_assert.
*/ */
enum known_values_t enum known_values_t

View File

@@ -175,7 +175,7 @@ public:
private: private:
/* /*
Intersect with a bitmap represented as as longlong. Intersect with a bitmap represented as longlong.
In addition, pad the rest of the bitmap with 0 or 1 bits In addition, pad the rest of the bitmap with 0 or 1 bits
depending on pad_with_ones parameter. depending on pad_with_ones parameter.
*/ */

View File

@@ -2080,7 +2080,7 @@ void THD::disconnect()
#ifdef SIGNAL_WITH_VIO_CLOSE #ifdef SIGNAL_WITH_VIO_CLOSE
/* /*
Since a active vio might might have not been set yet, in Since a active vio might have not been set yet, in
any case save a reference to avoid closing a inexistent any case save a reference to avoid closing a inexistent
one or closing the vio twice if there is a active one. one or closing the vio twice if there is a active one.
*/ */
@@ -8083,7 +8083,7 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, char const *query_arg,
The filter is in decide_logging_format() to mark queries to not be stored The filter is in decide_logging_format() to mark queries to not be stored
in the binary log, for example by a shared distributed engine like S3. in the binary log, for example by a shared distributed engine like S3.
This function resets the filter to ensure the the query is logged if This function resets the filter to ensure the query is logged if
the binlog is active. the binlog is active.
Note that 'direct' is set to false, which means that the query will Note that 'direct' is set to false, which means that the query will

View File

@@ -6690,7 +6690,7 @@ public:
("c"),("b"),("c"),("c"),("a"),("b"),("g") ("c"),("b"),("c"),("c"),("a"),("b"),("g")
("c"),("a"),("b"),("d"),("b"),("e") ("c"),("a"),("b"),("d"),("b"),("e")
- Let's demonstrate how the the set operation INTERSECT ALL is proceesed - Let's demonstrate how the set operation INTERSECT ALL is proceesed
for the query for the query
SELECT f FROM t1 INTERSECT ALL SELECT f FROM t2 SELECT f FROM t1 INTERSECT ALL SELECT f FROM t2
@@ -6738,7 +6738,7 @@ public:
|0 |1 |c | |0 |1 |c |
|0 |1 |c | |0 |1 |c |
- Let's demonstrate how the the set operation EXCEPT ALL is proceesed - Let's demonstrate how the set operation EXCEPT ALL is proceesed
for the query for the query
SELECT f FROM t1 EXCEPT ALL SELECT f FROM t3 SELECT f FROM t1 EXCEPT ALL SELECT f FROM t3
@@ -7341,7 +7341,7 @@ class multi_update :public select_result_interceptor
{ {
TABLE_LIST *all_tables; /* query/update command tables */ TABLE_LIST *all_tables; /* query/update command tables */
List<TABLE_LIST> *leaves; /* list of leaves of join table tree */ List<TABLE_LIST> *leaves; /* list of leaves of join table tree */
List<TABLE_LIST> updated_leaves; /* list of of updated leaves */ List<TABLE_LIST> updated_leaves; /* list of updated leaves */
TABLE_LIST *update_tables; TABLE_LIST *update_tables;
TABLE **tmp_tables, *main_table, *table_to_update; TABLE **tmp_tables, *main_table, *table_to_update;
TMP_TABLE_PARAM *tmp_table_param; TMP_TABLE_PARAM *tmp_table_param;

View File

@@ -359,7 +359,7 @@ struct st_unit_ctxt_elem
@details @details
For each table reference ref(T) from the FROM list of every select sl For each table reference ref(T) from the FROM list of every select sl
immediately contained in the specification query of this element this immediately contained in the specification query of this element this
method searches for the definition of T in the the with clause which method searches for the definition of T in the with clause which
this element belongs to. If such definition is found then the dependency this element belongs to. If such definition is found then the dependency
on it is set in sl->with_dep and in this->base_dep_map. on it is set in sl->with_dep and in this->base_dep_map.
*/ */
@@ -624,7 +624,7 @@ TABLE_LIST *With_element::find_first_sq_rec_ref_in_select(st_select_lex *sel)
@param dep_map IN/OUT The bit where to mark the found dependencies @param dep_map IN/OUT The bit where to mark the found dependencies
@details @details
This method searches in the unit 'unit' for the the references in FROM This method searches in the unit 'unit' for the references in FROM
lists of all selects contained in this unit and in the with clause lists of all selects contained in this unit and in the with clause
attached to this unit that refer to definitions of tables from the attached to this unit that refer to definitions of tables from the
same with clause as this element. same with clause as this element.
@@ -668,7 +668,7 @@ void With_element::check_dependencies_in_unit(st_select_lex_unit *unit,
@param dep_map IN/OUT The bit where to mark the found dependencies @param dep_map IN/OUT The bit where to mark the found dependencies
@details @details
This method searches in the with_clause for the the references in FROM This method searches in the with_clause for the references in FROM
lists of all selects contained in the specifications of the with elements lists of all selects contained in the specifications of the with elements
from this with_clause that refer to definitions of tables from the from this with_clause that refer to definitions of tables from the
same with clause as this element. same with clause as this element.

View File

@@ -521,7 +521,7 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
not in safe mode (not using option --safe-mode) not in safe mode (not using option --safe-mode)
- There is no limit clause - There is no limit clause
- The condition is constant - The condition is constant
- If there is a condition, then it it produces a non-zero value - If there is a condition, then it produces a non-zero value
- If the current command is DELETE FROM with no where clause, then: - If the current command is DELETE FROM with no where clause, then:
- We should not be binlogging this statement in row-based, and - We should not be binlogging this statement in row-based, and
- there should be no delete triggers associated with the table. - there should be no delete triggers associated with the table.

View File

@@ -88,7 +88,7 @@ static uint field_enumerator(uchar *arg)
@details @details
The function creates a temporary table for the expression cache, defines The function creates a temporary table for the expression cache, defines
the search index and initializes auxiliary search structures used to check the search index and initializes auxiliary search structures used to check
whether a given set of of values of the expression parameters is in some whether a given set of values of the expression parameters is in some
cache entry. cache entry.
*/ */

View File

@@ -424,7 +424,7 @@ void JOIN_CACHE::create_flag_fields()
the buffer. Such placement helps to optimize construction of access keys. the buffer. Such placement helps to optimize construction of access keys.
For each field that is used to build access keys to the joined table but For each field that is used to build access keys to the joined table but
is stored in some other join cache buffer the function saves a pointer is stored in some other join cache buffer the function saves a pointer
to the the field descriptor. The array of such pointers are placed in the to the field descriptor. The array of such pointers are placed in the
the join cache structure just before the array of pointers to the the join cache structure just before the array of pointers to the
blob fields blob_ptr. blob fields blob_ptr.
Any field stored in a join cache buffer that is used to construct keys Any field stored in a join cache buffer that is used to construct keys
@@ -444,7 +444,7 @@ void JOIN_CACHE::create_flag_fields()
through. For each of this pointers we find out in what previous key cache through. For each of this pointers we find out in what previous key cache
the referenced field is stored. The value of 'referenced_field_no' the referenced field is stored. The value of 'referenced_field_no'
provides us with the index into the array of offsets for referenced provides us with the index into the array of offsets for referenced
fields stored in the join cache. The offset read by the the index allows fields stored in the join cache. The offset read by the index allows
us to read the field without reading all other fields of the record us to read the field without reading all other fields of the record
stored the join cache buffer. This optimizes the construction of keys stored the join cache buffer. This optimizes the construction of keys
to access 'join_tab' when some key arguments are stored in the previous to access 'join_tab' when some key arguments are stored in the previous
@@ -522,7 +522,7 @@ void JOIN_CACHE::create_key_arg_fields()
} }
} }
} }
/* After this 'blob_ptr' shall not be be changed */ /* After this 'blob_ptr' shall not be changed */
blob_ptr= copy_ptr; blob_ptr= copy_ptr;
/* Now create local fields that are used to build ref for this key access */ /* Now create local fields that are used to build ref for this key access */
@@ -557,7 +557,7 @@ void JOIN_CACHE::create_key_arg_fields()
have to be added is determined as the difference between all read fields have to be added is determined as the difference between all read fields
and and those for which the descriptors have been already created. and and those for which the descriptors have been already created.
The latter are supposed to be marked in the bitmap tab->table->tmp_set. The latter are supposed to be marked in the bitmap tab->table->tmp_set.
The function increases the value of 'length' to the the total length of The function increases the value of 'length' to the total length of
the added fields. the added fields.
NOTES NOTES
@@ -1373,7 +1373,7 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
curr_rec_pos= cp; curr_rec_pos= cp;
/* If the there is a match flag set its value to 0 */ /* If there is a match flag set its value to 0 */
copy= field_descr; copy= field_descr;
if (with_match_flag) if (with_match_flag)
*copy[0].str= 0; *copy[0].str= 0;
@@ -1542,7 +1542,7 @@ uint JOIN_CACHE::write_record_data(uchar * link, bool *is_full)
to point to the very beginning of the join buffer. If the buffer is to point to the very beginning of the join buffer. If the buffer is
reset for writing additionally: reset for writing additionally:
- the counter of the records in the buffer is set to 0, - the counter of the records in the buffer is set to 0,
- the the value of 'last_rec_pos' gets pointing at the position just - the value of 'last_rec_pos' gets pointing at the position just
before the buffer, before the buffer,
- 'end_pos' is set to point to the beginning of the join buffer, - 'end_pos' is set to point to the beginning of the join buffer,
- the size of the auxiliary buffer is reset to 0, - the size of the auxiliary buffer is reset to 0,
@@ -1608,7 +1608,7 @@ bool JOIN_CACHE::put_record()
This default implementation of the virtual function get_record This default implementation of the virtual function get_record
reads fields of the next record from the join buffer of this cache. reads fields of the next record from the join buffer of this cache.
The function also reads all other fields associated with this record The function also reads all other fields associated with this record
from the the join buffers of the previous caches. The fields are read from the join buffers of the previous caches. The fields are read
into the corresponding record buffers. into the corresponding record buffers.
It is supposed that 'pos' points to the position in the buffer It is supposed that 'pos' points to the position in the buffer
right after the previous record when the function is called. right after the previous record when the function is called.
@@ -1656,7 +1656,7 @@ bool JOIN_CACHE::get_record()
This default implementation of the virtual function get_record_pos This default implementation of the virtual function get_record_pos
reads the fields of the record positioned at 'rec_ptr' from the join buffer. reads the fields of the record positioned at 'rec_ptr' from the join buffer.
The function also reads all other fields associated with this record The function also reads all other fields associated with this record
from the the join buffers of the previous caches. The fields are read from the join buffers of the previous caches. The fields are read
into the corresponding record buffers. into the corresponding record buffers.
RETURN VALUE RETURN VALUE
@@ -2200,7 +2200,7 @@ enum_nested_loop_state JOIN_CACHE::join_records(bool skip_last)
Prepare for generation of null complementing extensions. Prepare for generation of null complementing extensions.
For all inner tables of the outer join operation for which For all inner tables of the outer join operation for which
regular matches have been just found the field 'first_unmatched' regular matches have been just found the field 'first_unmatched'
is set to point the the first inner table. After all null is set to point the first inner table. After all null
complement rows are generated for this outer join this field complement rows are generated for this outer join this field
is set back to NULL. is set back to NULL.
*/ */
@@ -2637,7 +2637,7 @@ inline bool JOIN_CACHE::check_match(uchar *rec_ptr)
table records. table records.
If the 'join_tab' is the last inner table of the embedding outer If the 'join_tab' is the last inner table of the embedding outer
join and the null complemented record satisfies the outer join join and the null complemented record satisfies the outer join
condition then the the corresponding match flag is turned on condition then the corresponding match flag is turned on
unless it has been set earlier. This setting may trigger unless it has been set earlier. This setting may trigger
re-evaluation of pushdown conditions for the record. re-evaluation of pushdown conditions for the record.
@@ -2700,7 +2700,7 @@ finish:
DESCRIPTION DESCRIPTION
This function puts info about the type of the used join buffer (flat or This function puts info about the type of the used join buffer (flat or
incremental) and on the type of the the employed join algorithm (BNL, incremental) and on the type of the employed join algorithm (BNL,
BNLH, BKA or BKAH) to the data structure BNLH, BKA or BKAH) to the data structure
RETURN VALUE RETURN VALUE
@@ -2912,7 +2912,7 @@ int JOIN_CACHE_HASHED::init_hash_table()
/* /*
TODO: Make a better estimate for this upper bound of TODO: Make a better estimate for this upper bound of
the number of records in in the join buffer. the number of records in the join buffer.
*/ */
size_t max_n= buff_size / (pack_length-length+ size_t max_n= buff_size / (pack_length-length+
key_entry_length+size_of_key_ofs); key_entry_length+size_of_key_ofs);
@@ -3221,7 +3221,7 @@ bool JOIN_CACHE_HASHED::skip_if_not_needed_match()
key_len key value length key_len key value length
key_ref_ptr OUT position of the reference to the next key from key_ref_ptr OUT position of the reference to the next key from
the hash element for the found key , or the hash element for the found key , or
a position where the reference to the the hash a position where the reference to the hash
element for the key is to be added in the element for the key is to be added in the
case when the key has not been found case when the key has not been found
@@ -3454,7 +3454,7 @@ bool JOIN_CACHE_HASHED::check_all_match_flags_for_key(uchar *key_chain_ptr)
RETURN VALUE RETURN VALUE
length of the key value - if the starting value of 'cur_key_entry' refers length of the key value - if the starting value of 'cur_key_entry' refers
to the position after that referred by the the value of 'last_key_entry', to the position after that referred by the value of 'last_key_entry',
0 - otherwise. 0 - otherwise.
*/ */
@@ -3672,7 +3672,7 @@ bool JOIN_CACHE_BNL::prepare_look_for_matches(bool skip_last)
RETURN VALUE RETURN VALUE
pointer to the position right after the prefix of the current record pointer to the position right after the prefix of the current record
in the join buffer if the there is another record to iterate over, in the join buffer if there is another record to iterate over,
0 - otherwise. 0 - otherwise.
*/ */
@@ -3823,7 +3823,7 @@ uchar *JOIN_CACHE_BNLH::get_matching_chain_by_join_key()
record from the join buffer is ignored. record from the join buffer is ignored.
The function builds the hashed key from the join fields of join_tab The function builds the hashed key from the join fields of join_tab
and uses this key to look in the hash table of the join cache for and uses this key to look in the hash table of the join cache for
the chain of matching records in in the join buffer. If it finds the chain of matching records in the join buffer. If it finds
such a chain it sets the member last_rec_ref_ptr to point to the such a chain it sets the member last_rec_ref_ptr to point to the
last link of the chain while setting the member next_rec_ref_po 0. last link of the chain while setting the member next_rec_ref_po 0.
@@ -3862,7 +3862,7 @@ bool JOIN_CACHE_BNLH::prepare_look_for_matches(bool skip_last)
RETURN VALUE RETURN VALUE
pointer to the beginning of the record fields in the join buffer pointer to the beginning of the record fields in the join buffer
if the there is another record to iterate over, 0 - otherwise. if there is another record to iterate over, 0 - otherwise.
*/ */
uchar *JOIN_CACHE_BNLH::get_next_candidate_for_match() uchar *JOIN_CACHE_BNLH::get_next_candidate_for_match()
@@ -4066,7 +4066,7 @@ int JOIN_TAB_SCAN_MRR::next()
join_tab->tracker->r_rows++; join_tab->tracker->r_rows++;
join_tab->tracker->r_rows_after_where++; join_tab->tracker->r_rows_after_where++;
/* /*
If a record in in an incremental cache contains no fields then the If a record in an incremental cache contains no fields then the
association for the last record in cache will be equal to cache->end_pos association for the last record in cache will be equal to cache->end_pos
*/ */
/* /*
@@ -4284,7 +4284,7 @@ DESCRIPTION
RETURN VALUE RETURN VALUE
pointer to the start of the record fields in the join buffer pointer to the start of the record fields in the join buffer
if the there is another record to iterate over, 0 - otherwise. if there is another record to iterate over, 0 - otherwise.
*/ */
uchar *JOIN_CACHE_BKA::get_next_candidate_for_match() uchar *JOIN_CACHE_BKA::get_next_candidate_for_match()

View File

@@ -1239,7 +1239,7 @@ class JOIN_TAB_SCAN_MRR: public JOIN_TAB_SCAN
/* Number of ranges to be processed by the MRR interface */ /* Number of ranges to be processed by the MRR interface */
uint ranges; uint ranges;
/* Flag to to be passed to the MRR interface */ /* Flag to be passed to the MRR interface */
uint mrr_mode; uint mrr_mode;
/* MRR buffer assotiated with this join cache */ /* MRR buffer assotiated with this join cache */
@@ -1274,7 +1274,7 @@ class JOIN_CACHE_BKA :public JOIN_CACHE
{ {
private: private:
/* Flag to to be passed to the companion JOIN_TAB_SCAN_MRR object */ /* Flag to be passed to the companion JOIN_TAB_SCAN_MRR object */
uint mrr_mode; uint mrr_mode;
/* /*
@@ -1370,7 +1370,7 @@ class JOIN_CACHE_BKAH :public JOIN_CACHE_BNLH
{ {
private: private:
/* Flag to to be passed to the companion JOIN_TAB_SCAN_MRR object */ /* Flag to be passed to the companion JOIN_TAB_SCAN_MRR object */
uint mrr_mode; uint mrr_mode;
/* /*

View File

@@ -12265,7 +12265,7 @@ bool SELECT_LEX_UNIT::explainable() const
@param thd the current thread handle @param thd the current thread handle
@param db_name name of db of the table to look for @param db_name name of db of the table to look for
@param db_name name of db of the table to look for @param table_name name of table
@return first found table, NULL or ERROR_TABLE @return first found table, NULL or ERROR_TABLE
*/ */

View File

@@ -3342,7 +3342,7 @@ public:
at parse time to set local name resolution contexts for various parts at parse time to set local name resolution contexts for various parts
of a query. For example, in a JOIN ... ON (some_condition) clause the of a query. For example, in a JOIN ... ON (some_condition) clause the
Items in 'some_condition' must be resolved only against the operands Items in 'some_condition' must be resolved only against the operands
of the the join, and not against the whole clause. Similarly, Items in of the join, and not against the whole clause. Similarly, Items in
subqueries should be resolved against the subqueries (and outer queries). subqueries should be resolved against the subqueries (and outer queries).
The stack is used in the following way: when the parser detects that The stack is used in the following way: when the parser detects that
all Items in some clause need a local context, it creates a new context all Items in some clause need a local context, it creates a new context

View File

@@ -156,9 +156,8 @@ Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs)
@param name String searched for @param name String searched for
@param list_names A list of names searched in @param list_names A list of names searched in
@return True if if the name is in the list. @retval true String found
@retval true String found @retval false String not found
@retval false String not found
*/ */
static bool is_name_in_list(const char *name, List<const char> list_names) static bool is_name_in_list(const char *name, List<const char> list_names)
@@ -2136,7 +2135,7 @@ static int add_keyword_string(String *str, const char *keyword,
/** /**
@brief Truncate the partition file name from a path it it exists. @brief Truncate the partition file name from a path it exists.
@note A partition file name will contian one or more '#' characters. @note A partition file name will contian one or more '#' characters.
One of the occurances of '#' will be either "#P#" or "#p#" depending One of the occurances of '#' will be either "#P#" or "#p#" depending
@@ -3415,7 +3414,7 @@ uint32 get_list_array_idx_for_endpoint(partition_info *part_info,
'2000-00-00' can be compared to '2000-01-01' but TO_DAYS('2000-00-00') '2000-00-00' can be compared to '2000-01-01' but TO_DAYS('2000-00-00')
returns NULL which cannot be compared used <, >, <=, >= etc. returns NULL which cannot be compared used <, >, <=, >= etc.
Otherwise, just return the the first index (lowest value). Otherwise, just return the first index (lowest value).
*/ */
enum_monotonicity_info monotonic; enum_monotonicity_info monotonic;
monotonic= part_info->part_expr->get_monotonicity_info(); monotonic= part_info->part_expr->get_monotonicity_info();

View File

@@ -64,7 +64,7 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent,
/* /*
Avoid problems with a rename on a table that we have locked or Avoid problems with a rename on a table that we have locked or
if the user is trying to to do this in a transcation context if the user is trying to do this in a transcation context
*/ */
if (thd->locked_tables_mode || thd->in_active_multi_stmt_transaction()) if (thd->locked_tables_mode || thd->in_active_multi_stmt_transaction())

View File

@@ -6669,7 +6669,7 @@ add_key_field(JOIN *join,
@note @note
If field items f1 and f2 belong to the same multiple equality and If field items f1 and f2 belong to the same multiple equality and
a key is added for f1, the the same key is added for f2. a key is added for f1, the same key is added for f2.
@returns @returns
*key_fields is incremented if we stored a key in the array *key_fields is incremented if we stored a key in the array
@@ -8399,7 +8399,7 @@ best_access_path(JOIN *join,
} while (keyuse->table == table && keyuse->key == key); } while (keyuse->table == table && keyuse->key == key);
/* /*
Assume that that each key matches a proportional part of table. Assume that each key matches a proportional part of table.
*/ */
if (!found_part && !ft_key && !loose_scan_opt.have_a_case()) if (!found_part && !ft_key && !loose_scan_opt.have_a_case())
continue; // Nothing usable found continue; // Nothing usable found
@@ -8782,7 +8782,7 @@ best_access_path(JOIN *join,
{ {
double rows= record_count * records; double rows= record_count * records;
/* /*
If we use filter F with selectivity s the the cost of fetching data If we use filter F with selectivity s the cost of fetching data
by key using this filter will be by key using this filter will be
cost_of_fetching_1_row * rows * s + cost_of_fetching_1_row * rows * s +
cost_of_fetching_1_key_tuple * rows * (1 - s) + cost_of_fetching_1_key_tuple * rows * (1 - s) +
@@ -9032,7 +9032,7 @@ best_access_path(JOIN *join,
(1) The found 'ref' access produces more records than a table scan (1) The found 'ref' access produces more records than a table scan
(or index scan, or quick select), or 'ref' is more expensive than (or index scan, or quick select), or 'ref' is more expensive than
any of them. any of them.
(2) This doesn't hold: the best way to perform table scan is to to perform (2) This doesn't hold: the best way to perform table scan is to perform
'range' access using index IDX, and the best way to perform 'ref' 'range' access using index IDX, and the best way to perform 'ref'
access is to use the same index IDX, with the same or more key parts. access is to use the same index IDX, with the same or more key parts.
(note: it is not clear how this rule is/should be extended to (note: it is not clear how this rule is/should be extended to
@@ -10151,7 +10151,7 @@ void JOIN::get_partial_cost_and_fanout(int end_tab_idx,
- it operates on a JOIN that haven't yet finished its optimization phase (in - it operates on a JOIN that haven't yet finished its optimization phase (in
particular, fix_semijoin_strategies_for_picked_join_order() and particular, fix_semijoin_strategies_for_picked_join_order() and
get_best_combination() haven't been called) get_best_combination() haven't been called)
- it assumes the the join prefix doesn't have any semi-join plans - it assumes the join prefix doesn't have any semi-join plans
These assumptions are met by the caller of the function. These assumptions are met by the caller of the function.
*/ */
@@ -10390,7 +10390,7 @@ double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
as a starting point. This value includes selectivity of equality (*). We as a starting point. This value includes selectivity of equality (*). We
should somehow discount it. should somehow discount it.
Looking at calculate_cond_selectivity_for_table(), one can see that that Looking at calculate_cond_selectivity_for_table(), one can see that
the value is not necessarily a direct multiplicand in the value is not necessarily a direct multiplicand in
table->cond_selectivity table->cond_selectivity
@@ -11682,7 +11682,7 @@ int JOIN_TAB::make_scan_filter()
@details @details
This function finds out whether the ref items that have been chosen This function finds out whether the ref items that have been chosen
by the planner to access this table can be used for hash join algorithms. by the planner to access this table can be used for hash join algorithms.
The answer depends on a certain property of the the fields of the The answer depends on a certain property of the fields of the
joined tables on which the hash join key is built. joined tables on which the hash join key is built.
@note @note
@@ -11934,7 +11934,7 @@ JOIN_TAB *first_explain_order_tab(JOIN* join)
JOIN_TAB* tab; JOIN_TAB* tab;
tab= join->join_tab; tab= join->join_tab;
if (!tab) if (!tab)
return NULL; /* Can happen when when the tables were optimized away */ return NULL; /* Can happen when the tables were optimized away */
return (tab->bush_children) ? tab->bush_children->start : tab; return (tab->bush_children) ? tab->bush_children->start : tab;
} }
@@ -12894,7 +12894,7 @@ inline void add_cond_and_fix(THD *thd, Item **e1, Item *e2)
Implementation overview Implementation overview
1. update_ref_and_keys() accumulates info about null-rejecting 1. update_ref_and_keys() accumulates info about null-rejecting
predicates in in KEY_FIELD::null_rejecting predicates in KEY_FIELD::null_rejecting
1.1 add_key_part saves these to KEYUSE. 1.1 add_key_part saves these to KEYUSE.
2. create_ref_for_key copies them to TABLE_REF. 2. create_ref_for_key copies them to TABLE_REF.
3. add_not_null_conds adds "x IS NOT NULL" to join_tab->select_cond of 3. add_not_null_conds adds "x IS NOT NULL" to join_tab->select_cond of
@@ -14504,7 +14504,7 @@ end_sj_materialize(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
has been chosen. If the function decides that a join buffer can be employed has been chosen. If the function decides that a join buffer can be employed
then it selects the most appropriate join cache object that contains this then it selects the most appropriate join cache object that contains this
join buffer. join buffer.
The result of the check and the type of the the join buffer to be used The result of the check and the type of the join buffer to be used
depend on: depend on:
- the access method to access rows of the joined table - the access method to access rows of the joined table
- whether the join table is an inner table of an outer join or semi-join - whether the join table is an inner table of an outer join or semi-join
@@ -14965,7 +14965,7 @@ restart:
to re-check the same single-table condition for each joined record. to re-check the same single-table condition for each joined record.
This method removes from JOIN_TAB::select_cond and JOIN_TAB::select::cond This method removes from JOIN_TAB::select_cond and JOIN_TAB::select::cond
all top-level conjuncts that also appear in in JOIN_TAB::cache_select::cond. all top-level conjuncts that also appear in JOIN_TAB::cache_select::cond.
*/ */
void JOIN_TAB::remove_redundant_bnl_scan_conds() void JOIN_TAB::remove_redundant_bnl_scan_conds()
@@ -22745,7 +22745,7 @@ sub_select_postjoin_aggr(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
NOTES NOTES
The function implements the algorithmic schema for both Blocked Nested The function implements the algorithmic schema for both Blocked Nested
Loop Join and Batched Key Access Join. The difference can be seen only at Loop Join and Batched Key Access Join. The difference can be seen only at
the level of of the implementation of the put_record and join_records the level of the implementation of the put_record and join_records
virtual methods for the cache object associated with the join_tab. virtual methods for the cache object associated with the join_tab.
The put_record method accumulates records in the cache, while the The put_record method accumulates records in the cache, while the
join_records method builds all matching join records and send them into join_records method builds all matching join records and send them into
@@ -22913,7 +22913,7 @@ sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool end_of_records)
the predicate (t2.b=5 OR t2.b IS NULL) can not be checked until the predicate (t2.b=5 OR t2.b IS NULL) can not be checked until
t4.a=t2.a becomes true. t4.a=t2.a becomes true.
In order not to re-evaluate the predicates that were already evaluated In order not to re-evaluate the predicates that were already evaluated
as attached pushed down predicates, a pointer to the the first as attached pushed down predicates, a pointer to the first
most inner unmatched table is maintained in join_tab->first_unmatched. most inner unmatched table is maintained in join_tab->first_unmatched.
Thus, when the first row from t5 with t5.a=t3.a is found Thus, when the first row from t5 with t5.a=t3.a is found
this pointer for t5 is changed from t4 to t2. this pointer for t5 is changed from t4 to t2.
@@ -23309,7 +23309,7 @@ evaluate_null_complemented_join_record(JOIN *join, JOIN_TAB *join_tab)
COND *select_cond; COND *select_cond;
for ( ; join_tab <= last_inner_tab ; join_tab++) for ( ; join_tab <= last_inner_tab ; join_tab++)
{ {
/* Change the the values of guard predicate variables. */ /* Change the values of guard predicate variables. */
join_tab->found= 1; join_tab->found= 1;
join_tab->not_null_compl= 0; join_tab->not_null_compl= 0;
/* The outer row is complemented by nulls for each inner tables */ /* The outer row is complemented by nulls for each inner tables */
@@ -28738,7 +28738,7 @@ void free_underlaid_joins(THD *thd, SELECT_LEX *select)
The function replaces occurrences of group by fields in expr The function replaces occurrences of group by fields in expr
by ref objects for these fields unless they are under aggregate by ref objects for these fields unless they are under aggregate
functions. functions.
The function also corrects value of the the maybe_null attribute The function also corrects value of the maybe_null attribute
for the items of all subexpressions containing group by fields. for the items of all subexpressions containing group by fields.
@b EXAMPLES @b EXAMPLES
@@ -29211,7 +29211,7 @@ void inline JOIN::clear_sum_funcs()
Prepare for returning 'empty row' when there is no matching row. Prepare for returning 'empty row' when there is no matching row.
- Mark all tables with mark_as_null_row() - Mark all tables with mark_as_null_row()
- Make a copy of of all simple SELECT items - Make a copy of all simple SELECT items
- Reset all sum functions to NULL or 0. - Reset all sum functions to NULL or 0.
*/ */
@@ -31668,7 +31668,7 @@ test_if_cheaper_ordering(bool in_join_optimizer,
{ {
KEY *pkinfo=tab->table->key_info+table->s->primary_key; KEY *pkinfo=tab->table->key_info+table->s->primary_key;
/* /*
If the values of of records per key for the prefixes If the values of records per key for the prefixes
of the primary key are considered unknown we assume of the primary key are considered unknown we assume
they are equal to 1. they are equal to 1.
*/ */

View File

@@ -286,7 +286,7 @@ typedef struct st_join_table {
st_join_table *first_inner; /**< first inner table for including outerjoin */ st_join_table *first_inner; /**< first inner table for including outerjoin */
bool found; /**< true after all matches or null complement */ bool found; /**< true after all matches or null complement */
bool not_null_compl;/**< true before null complement is added */ bool not_null_compl;/**< true before null complement is added */
st_join_table *last_inner; /**< last table table for embedding outer join */ st_join_table *last_inner; /**< last table for embedding outer join */
st_join_table *first_upper; /**< first inner table for embedding outer join */ st_join_table *first_upper; /**< first inner table for embedding outer join */
st_join_table *first_unmatched; /**< used for optimization purposes only */ st_join_table *first_unmatched; /**< used for optimization purposes only */
@@ -1720,7 +1720,7 @@ public:
memcpy(dest, src, src_arr.size() * src_arr.element_size()); memcpy(dest, src, src_arr.size() * src_arr.element_size());
} }
/// Overwrites 'ref_ptrs' and remembers the the source as 'current'. /// Overwrites 'ref_ptrs' and remembers the source as 'current'.
void set_items_ref_array(Ref_ptr_array src_arr) void set_items_ref_array(Ref_ptr_array src_arr)
{ {
copy_ref_ptr_array(ref_ptrs, src_arr); copy_ref_ptr_array(ref_ptrs, src_arr);

View File

@@ -80,7 +80,7 @@ protected:
It's also responsible to generate new values and updating the sequence It's also responsible to generate new values and updating the sequence
table (engine=SQL_SEQUENCE) trough it's specialized handler interface. table (engine=SQL_SEQUENCE) trough it's specialized handler interface.
If increment is 0 then the sequence will be be using If increment is 0 then the sequence will be using
auto_increment_increment and auto_increment_offset variables, just like auto_increment_increment and auto_increment_offset variables, just like
AUTO_INCREMENT is using. AUTO_INCREMENT is using.
*/ */

View File

@@ -972,7 +972,7 @@ void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to)
0, then index_read_idx is called to read the index to that record, the 0, then index_read_idx is called to read the index to that record, the
record then being ready to be updated, if found. If not found an error is record then being ready to be updated, if found. If not found an error is
set and error message printed. If the record is found, store_record is set and error message printed. If the record is found, store_record is
called, then store_server_fields stores each field from the the members of called, then store_server_fields stores each field from the members of
the updated FOREIGN_SERVER struct. the updated FOREIGN_SERVER struct.
RETURN VALUE RETURN VALUE

View File

@@ -52,7 +52,7 @@ typedef struct st_sort_addon_field
/* Sort addon packed field */ /* Sort addon packed field */
Field *field; /* Original field */ Field *field; /* Original field */
uint offset; /* Offset from the last sorted field */ uint offset; /* Offset from the last sorted field */
uint null_offset; /* Offset to to null bit from the last sorted field */ uint null_offset; /* Offset to null bit from the last sorted field */
uint length; /* Length in the sort buffer */ uint length; /* Length in the sort buffer */
uint8 null_bit; /* Null bit mask for the field */ uint8 null_bit; /* Null bit mask for the field */
} SORT_ADDON_FIELD; } SORT_ADDON_FIELD;

View File

@@ -439,7 +439,7 @@ public:
statistical data is to be read. E.g. if the index idx consists of 3 statistical data is to be read. E.g. if the index idx consists of 3
components (p1,p2,p3) the table index_stats usually will contain 3 rows for components (p1,p2,p3) the table index_stats usually will contain 3 rows for
this index: the first - for the prefix (p1), the second - for the prefix this index: the first - for the prefix (p1), the second - for the prefix
(p1,p2), and the third - for the the prefix (p1,p2,p3). After the key fields (p1,p2), and the third - for the prefix (p1,p2,p3). After the key fields
has been set a call of get_stat_value looks for a row by the set key value. has been set a call of get_stat_value looks for a row by the set key value.
If the row is found and the value of the avg_frequency column is not null If the row is found and the value of the avg_frequency column is not null
then this value is assigned to key_info->read_stat.avg_frequency[k]. then this value is assigned to key_info->read_stat.avg_frequency[k].
@@ -3308,7 +3308,7 @@ read_statistics_for_tables(THD *thd, TABLE_LIST *tables, bool force_reload)
/* /*
Do not read statistics for any query that explicity involves Do not read statistics for any query that explicity involves
statistical tables, failure to to do so we may end up statistical tables, failure to do so we may end up
in a deadlock. in a deadlock.
*/ */
if (found_stat_table || !statistics_for_tables_is_needed) if (found_stat_table || !statistics_for_tables_is_needed)
@@ -3345,7 +3345,7 @@ read_statistics_for_tables(THD *thd, TABLE_LIST *tables, bool force_reload)
/* /*
The following lock is here to ensure that if a lot of threads are The following lock is here to ensure that if a lot of threads are
accessing the table at the same time after a ANALYZE TABLE, accessing the table at the same time after a ANALYZE TABLE,
only one thread is loading the data from the the stats tables only one thread is loading the data from the stats tables
and the others threads are reusing the loaded data. and the others threads are reusing the loaded data.
*/ */
mysql_mutex_lock(&table_share->LOCK_statistics); mysql_mutex_lock(&table_share->LOCK_statistics);

View File

@@ -359,7 +359,7 @@ public:
/* /*
This is used to collect the the basic statistics from a Unique object: This is used to collect the basic statistics from a Unique object:
- count of values - count of values
- count of distinct values - count of distinct values
- count of distinct values that have occurred only once - count of distinct values that have occurred only once

View File

@@ -4478,7 +4478,7 @@ handler *mysql_create_frm_image(THD *thd, HA_CREATE_INFO *create_info,
/* /*
Unless table's storage engine supports partitioning natively Unless table's storage engine supports partitioning natively
don't allow foreign keys on partitioned tables (they won't don't allow foreign keys on partitioned tables (they won't
work work even with InnoDB beneath of partitioning engine). work even with InnoDB beneath of partitioning engine).
If storage engine handles partitioning natively (like NDB) If storage engine handles partitioning natively (like NDB)
foreign keys support is possible, so we let the engine decide. foreign keys support is possible, so we let the engine decide.
*/ */
@@ -11722,7 +11722,7 @@ do_continue:;
5) Write statement to the binary log. 5) Write statement to the binary log.
6) If we are under LOCK TABLES and do ALTER TABLE ... RENAME we 6) If we are under LOCK TABLES and do ALTER TABLE ... RENAME we
remove placeholders and release metadata locks. remove placeholders and release metadata locks.
7) If we are not not under LOCK TABLES we rely on the caller 7) If we are not under LOCK TABLES we rely on the caller
(mysql_execute_command()) to release metadata locks. (mysql_execute_command()) to release metadata locks.
*/ */

View File

@@ -160,7 +160,7 @@ int calc_weekday(long daynr,bool sunday_first_day_of_week)
If set Monday is first day of week If set Monday is first day of week
WEEK_YEAR (1) If not set Week is in range 0-53 WEEK_YEAR (1) If not set Week is in range 0-53
Week 0 is returned for the the last week of the previous year (for Week 0 is returned for the last week of the previous year (for
a date at start of january) In this case one can get 53 for the a date at start of january) In this case one can get 53 for the
first week of next year. This flag ensures that the week is first week of next year. This flag ensures that the week is
relevant for the given year. Note that this flag is only relevant for the given year. Note that this flag is only

View File

@@ -683,7 +683,7 @@ bool table_value_constr::to_be_wrapped_as_with_tail()
the select of the form the select of the form
SELECT * FROM (VALUES (v1), ... (vn)) tvc_x SELECT * FROM (VALUES (v1), ... (vn)) tvc_x
@retval pointer to the result of of the transformation if successful @retval pointer to the result of the transformation if successful
NULL - otherwise NULL - otherwise
*/ */
@@ -736,7 +736,7 @@ st_select_lex *wrap_tvc(THD *thd, st_select_lex *tvc_sl,
/* /*
Create a unit for the substituted select used for TVC and attach it Create a unit for the substituted select used for TVC and attach it
to the the wrapper select wrapper_sl as the only unit. The created to the wrapper select wrapper_sl as the only unit. The created
unit is the unit for the derived table tvc_x of the transformation. unit is the unit for the derived table tvc_x of the transformation.
*/ */
if (!(derived_unit= new (thd->mem_root) SELECT_LEX_UNIT())) if (!(derived_unit= new (thd->mem_root) SELECT_LEX_UNIT()))
@@ -803,7 +803,7 @@ err:
SELECT * FROM (VALUES (v1), ... (vn)) tvc_x SELECT * FROM (VALUES (v1), ... (vn)) tvc_x
ORDER BY ... LIMIT n [OFFSET m] ORDER BY ... LIMIT n [OFFSET m]
@retval pointer to the result of of the transformation if successful @retval pointer to the result of the transformation if successful
NULL - otherwise NULL - otherwise
*/ */

View File

@@ -4192,7 +4192,7 @@ public:
@retval @retval
NULL on error NULL on error
@retval @retval
non-NULL a pointer to a a valid string on success non-NULL a pointer to a valid string on success
*/ */
virtual String *print_item_value(THD *thd, Item *item, String *str) const= 0; virtual String *print_item_value(THD *thd, Item *item, String *str) const= 0;

View File

@@ -4204,7 +4204,7 @@ static Sys_var_on_access_global<Sys_var_enum,
Sys_thread_pool_priority( Sys_thread_pool_priority(
"thread_pool_priority", "thread_pool_priority",
"Threadpool priority. High priority connections usually start executing earlier than low priority." "Threadpool priority. High priority connections usually start executing earlier than low priority."
"If priority set to 'auto', the the actual priority(low or high) is determined based on whether or not connection is inside transaction.", "If priority set to 'auto', the actual priority(low or high) is determined based on whether or not connection is inside transaction.",
SESSION_VAR(threadpool_priority), CMD_LINE(REQUIRED_ARG), SESSION_VAR(threadpool_priority), CMD_LINE(REQUIRED_ARG),
threadpool_priority_names, DEFAULT(TP_PRIORITY_AUTO)); threadpool_priority_names, DEFAULT(TP_PRIORITY_AUTO));
@@ -4806,7 +4806,7 @@ static Sys_var_bit Sys_foreign_key_checks(
" (including ON UPDATE and ON DELETE behavior) InnoDB tables are checked," " (including ON UPDATE and ON DELETE behavior) InnoDB tables are checked,"
" while if set to 0, they are not checked. 0 is not recommended for normal " " while if set to 0, they are not checked. 0 is not recommended for normal "
"use, though it can be useful in situations where you know the data is " "use, though it can be useful in situations where you know the data is "
"consistent, but want to reload data in a different order from that that " "consistent, but want to reload data in a different order from that "
"specified by parent/child relationships. Setting this variable to 1 does " "specified by parent/child relationships. Setting this variable to 1 does "
"not retrospectively check for inconsistencies introduced while set to 0.", "not retrospectively check for inconsistencies introduced while set to 0.",
SESSION_VAR(option_bits), NO_CMD_LINE, SESSION_VAR(option_bits), NO_CMD_LINE,

View File

@@ -3914,7 +3914,7 @@ bool Virtual_column_info::check_access(THD *thd)
table 'table' and parses it, building an item object for it. The table 'table' and parses it, building an item object for it. The
pointer to this item is placed into in a Virtual_column_info object pointer to this item is placed into in a Virtual_column_info object
that is created. After this the function performs that is created. After this the function performs
semantic analysis of the item by calling the the function semantic analysis of the item by calling the function
fix_and_check_vcol_expr(). Since the defining expression is part of the table fix_and_check_vcol_expr(). Since the defining expression is part of the table
definition the item for it is created in table->memroot within the definition the item for it is created in table->memroot within the
special arena TABLE::expr_arena or in the thd memroot for INSERT DELAYED special arena TABLE::expr_arena or in the thd memroot for INSERT DELAYED

View File

@@ -2729,7 +2729,7 @@ struct TABLE_LIST
{ {
/* Normal open. */ /* Normal open. */
OPEN_NORMAL= 0, OPEN_NORMAL= 0,
/* Associate a table share only if the the table exists. */ /* Associate a table share only if the table exists. */
OPEN_IF_EXISTS, OPEN_IF_EXISTS,
/* Don't associate a table share. */ /* Don't associate a table share. */
OPEN_STUB OPEN_STUB

View File

@@ -652,7 +652,7 @@ bool THD::drop_temporary_table(TABLE *table, bool *is_trans, bool delete_table)
{ {
if (tab != table && tab->query_id != 0) if (tab != table && tab->query_id != 0)
{ {
/* Found a table instance in use. This table cannot be be dropped. */ /* Found a table instance in use. This table cannot be dropped. */
my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias.c_ptr()); my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias.c_ptr());
result= true; result= true;
goto end; goto end;

View File

@@ -27,7 +27,7 @@
Considerations on Windows : since Windows locks the AIO buffers in physical memory, Considerations on Windows : since Windows locks the AIO buffers in physical memory,
it is important that these buffers are compactly allocated. it is important that these buffers are compactly allocated.
We try to to prevent any kinds of memory fragmentation We try to prevent any kinds of memory fragmentation
A relatively small region (at most 1MB) is allocated, for equally sized smallish(256 bytes) A relatively small region (at most 1MB) is allocated, for equally sized smallish(256 bytes)
This allow buffers. The region is pagesize-aligned (via VirtualAlloc allocation) This allow buffers. The region is pagesize-aligned (via VirtualAlloc allocation)

View File

@@ -1052,7 +1052,7 @@ public:
This method uses system function (localtime_r()) for conversion This method uses system function (localtime_r()) for conversion
local time in system time zone in MYSQL_TIME structure to its my_time_t local time in system time zone in MYSQL_TIME structure to its my_time_t
representation. Unlike the same function for Time_zone_db class representation. Unlike the same function for Time_zone_db class
it it won't handle unnormalized input properly. Still it will it won't handle unnormalized input properly. Still it will
return lowest possible my_time_t in case of ambiguity or if we return lowest possible my_time_t in case of ambiguity or if we
provide time corresponding to the time-gap. provide time corresponding to the time-gap.

View File

@@ -559,7 +559,7 @@ Recover (repair)/ options (When using '--recover' or '--safe-recover'):\n\
file when it's full).\n\ file when it's full).\n\
--create-missing-keys\n\ --create-missing-keys\n\
Create missing keys. This assumes that the data\n\ Create missing keys. This assumes that the data\n\
file is correct and that the the number of rows stored\n\ file is correct and that the number of rows stored\n\
in the index file is correct. Enables --quick.\n\ in the index file is correct. Enables --quick.\n\
-e, --extend-check Try to recover every possible row from the data file\n\ -e, --extend-check Try to recover every possible row from the data file\n\
Normally this will also find a lot of garbage rows;\n\ Normally this will also find a lot of garbage rows;\n\

View File

@@ -93,7 +93,7 @@ int aria_get_capabilities(File kfile, ARIA_TABLE_CAPABILITIES *cap)
cap->bitmap_pages_covered= aligned_bit_blocks * 16 + 1; cap->bitmap_pages_covered= aligned_bit_blocks * 16 + 1;
} }
/* Do a check that that we got things right */ /* Do a check that we got things right */
if (share.state.header.data_file_type != BLOCK_RECORD && if (share.state.header.data_file_type != BLOCK_RECORD &&
cap->online_backup_safe) cap->online_backup_safe)
error= HA_ERR_NOT_A_TABLE; error= HA_ERR_NOT_A_TABLE;

View File

@@ -982,7 +982,7 @@ static int collect_tables(LEX_STRING *str, LSN checkpoint_start_log_horizon)
Tables in a normal state have their two file descriptors open. Tables in a normal state have their two file descriptors open.
In some rare cases like REPAIR, some descriptor may be closed or even In some rare cases like REPAIR, some descriptor may be closed or even
-1. If that happened, the _ma_state_info_write() may fail. This is -1. If that happened, the _ma_state_info_write() may fail. This is
prevented by enclosing all all places which close/change kfile.file with prevented by enclosing all places which close/change kfile.file with
intern_lock. intern_lock.
*/ */
kfile= share->kfile; kfile= share->kfile;

View File

@@ -1480,7 +1480,7 @@ LSN translog_get_file_max_lsn_stored(uint32 file)
if (file >= limit) if (file >= limit)
{ {
DBUG_PRINT("info", ("The file in in progress")); DBUG_PRINT("info", ("The file in progress"));
DBUG_RETURN(LSN_IMPOSSIBLE); DBUG_RETURN(LSN_IMPOSSIBLE);
} }
@@ -4522,7 +4522,7 @@ static my_bool translog_write_parts_on_page(TRANSLOG_ADDRESS *horizon,
if (!cursor->chaser) if (!cursor->chaser)
cursor->buffer->size+= length; cursor->buffer->size+= length;
/* /*
We do not not updating parts->total_record_length here because it is We do not updating parts->total_record_length here because it is
need only before writing record to have total length need only before writing record to have total length
*/ */
DBUG_PRINT("info", ("Write parts buffer #%u: %p " DBUG_PRINT("info", ("Write parts buffer #%u: %p "

View File

@@ -2175,7 +2175,7 @@ int maria_indexes_are_disabled(MARIA_HA *info)
/* /*
No keys or all are enabled. keys is the number of keys. Left shifted No keys or all are enabled. keys is the number of keys. Left shifted
gives us only one bit set. When decreased by one, gives us all all bits gives us only one bit set. When decreased by one, gives us all bits
up to this one set and it gets unset. up to this one set and it gets unset.
*/ */
if (!share->base.keys || if (!share->base.keys ||

View File

@@ -996,7 +996,7 @@ static int flush_all_key_blocks(PAGECACHE *pagecache)
The function first compares the memory size parameter The function first compares the memory size parameter
with the key cache value. with the key cache value.
If they differ the function free the the memory allocated for the If they differ the function free the memory allocated for the
old key cache blocks by calling the end_pagecache function and old key cache blocks by calling the end_pagecache function and
then rebuilds the key cache with new blocks by calling then rebuilds the key cache with new blocks by calling
init_key_cache. init_key_cache.

View File

@@ -685,7 +685,7 @@ prototype_redo_exec_hook(INCOMPLETE_LOG)
{ {
MARIA_HA *info; MARIA_HA *info;
/* We try to get table first, so that we get the table in in the trace log */ /* We try to get table first, so that we get the table in the trace log */
info= get_MARIA_HA_from_REDO_record(rec); info= get_MARIA_HA_from_REDO_record(rec);
if (skip_DDLs) if (skip_DDLs)
@@ -1175,7 +1175,7 @@ prototype_redo_exec_hook(REDO_REPAIR_TABLE)
my_bool quick_repair; my_bool quick_repair;
DBUG_ENTER("exec_REDO_LOGREC_REDO_REPAIR_TABLE"); DBUG_ENTER("exec_REDO_LOGREC_REDO_REPAIR_TABLE");
/* We try to get table first, so that we get the table in in the trace log */ /* We try to get table first, so that we get the table in the trace log */
info= get_MARIA_HA_from_REDO_record(rec); info= get_MARIA_HA_from_REDO_record(rec);
if (!info) if (!info)

View File

@@ -276,7 +276,7 @@ ret_error:
If keys are packed, then smaller or identical key is stored in buff If keys are packed, then smaller or identical key is stored in buff
@return @return
@retval <0, 0 , >0 depending on if if found is smaller, equal or bigger than @retval <0, 0 , >0 depending on if found is smaller, equal or bigger than
'key' 'key'
@retval ret_pos Points to where the identical or bigger key starts @retval ret_pos Points to where the identical or bigger key starts
@retval last_key Set to 1 if key is the last key in the page. @retval last_key Set to 1 if key is the last key in the page.

View File

@@ -854,7 +854,7 @@ int _ma_insert(register MARIA_HA *info, MARIA_KEY *key,
page_store_size(share, anc_page); page_store_size(share, anc_page);
/* /*
Check if the new key fits totally into the the page Check if the new key fits totally into the page
(anc_buff is big enough to contain a full page + one key) (anc_buff is big enough to contain a full page + one key)
*/ */
if (a_length <= share->max_index_block_size) if (a_length <= share->max_index_block_size)

View File

@@ -18,7 +18,7 @@
/* some definitions for full-text indices */ /* some definitions for full-text indices */
/* ftdefs.h is is always included first when used, so we have to include my_global.h here */ /* ftdefs.h is always included first when used, so we have to include my_global.h here */
#include <my_global.h> #include <my_global.h>
#include "fulltext.h" #include "fulltext.h"
#include <m_ctype.h> #include <m_ctype.h>

View File

@@ -19,7 +19,7 @@
#pragma interface /* gcc class implementation */ #pragma interface /* gcc class implementation */
#endif #endif
/* class for the the myisam handler */ /* class for the MyISAM handler */
#include <myisam.h> #include <myisam.h>
#include <ft_global.h> #include <ft_global.h>

View File

@@ -1397,7 +1397,7 @@ int mi_indexes_are_disabled(MI_INFO *info)
/* /*
No keys or all are enabled. keys is the number of keys. Left shifted No keys or all are enabled. keys is the number of keys. Left shifted
gives us only one bit set. When decreased by one, gives us all all bits gives us only one bit set. When decreased by one, gives us all bits
up to this one set and it gets unset. up to this one set and it gets unset.
*/ */
if (!share->base.keys || if (!share->base.keys ||

View File

@@ -172,7 +172,7 @@ static struct my_option my_long_options[] =
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
{"create-missing-keys", OPT_CREATE_MISSING_KEYS, {"create-missing-keys", OPT_CREATE_MISSING_KEYS,
"Create missing keys. This assumes that the data file is correct and that " "Create missing keys. This assumes that the data file is correct and that "
"the the number of rows stored in the index file is correct. Enables " "the number of rows stored in the index file is correct. Enables "
"--quick", "--quick",
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
#ifndef DBUG_OFF #ifndef DBUG_OFF