From 22efc2c784e1b7199fb5804e6330168277ea7dce Mon Sep 17 00:00:00 2001 From: Vasilii Lakhin Date: Sun, 23 Mar 2025 08:08:11 +0200 Subject: [PATCH] Fix typos in C comments inside storage/ --- storage/archive/azio.c | 2 +- storage/archive/ha_archive.cc | 8 ++++---- storage/archive/ha_archive.h | 2 +- storage/connect/array.h | 2 +- storage/connect/csort.cpp | 2 +- storage/connect/filamgz.cpp | 2 +- storage/connect/filamtxt.cpp | 4 ++-- storage/connect/filter.cpp | 4 ++-- storage/connect/fmdlex.c | 2 +- storage/connect/ha_connect.cc | 2 +- storage/connect/ha_connect.h | 2 +- storage/connect/inihandl.cpp | 2 +- storage/connect/odbconn.cpp | 2 +- storage/connect/plgcnx.h | 4 ++-- storage/connect/plgdbsem.h | 2 +- storage/connect/plgdbutl.cpp | 20 +++++++++---------- storage/connect/plgxml.cpp | 2 +- storage/connect/reldef.cpp | 2 +- storage/connect/reldef.h | 4 ++-- storage/connect/tabdos.cpp | 6 +++--- storage/connect/tabfmt.cpp | 2 +- storage/connect/tabjdbc.cpp | 2 +- storage/connect/tabmul.cpp | 2 +- storage/connect/tabpivot.cpp | 2 +- storage/connect/tabrest.cpp | 2 +- storage/connect/tabvct.cpp | 2 +- storage/connect/tabwmi.cpp | 4 ++-- storage/connect/tabxml.cpp | 4 ++-- storage/connect/unzip.c | 6 +++--- storage/connect/user_connect.cc | 4 ++-- storage/connect/value.cpp | 2 +- storage/connect/xindex.h | 2 +- storage/csv/ha_tina.cc | 8 ++++---- storage/federated/ha_federated.cc | 4 ++-- storage/federated/ha_federated.h | 2 +- storage/heap/hp_create.c | 6 +++--- storage/heap/hp_hash.c | 2 +- storage/innobase/btr/btr0btr.cc | 4 ++-- storage/innobase/btr/btr0cur.cc | 2 +- storage/innobase/buf/buf0buf.cc | 4 ++-- storage/innobase/buf/buf0dblwr.cc | 2 +- storage/innobase/buf/buf0flu.cc | 4 ++-- storage/innobase/dict/dict0stats.cc | 2 +- storage/innobase/fil/fil0fil.cc | 6 +++--- storage/innobase/fil/fil0pagecompress.cc | 2 +- storage/innobase/fsp/fsp0fsp.cc | 6 +++--- storage/innobase/fsp/fsp0sysspace.cc | 4 ++-- storage/innobase/fts/fts0ast.cc | 2 +- storage/innobase/fts/fts0fts.cc | 6 +++--- storage/innobase/fts/fts0opt.cc | 8 ++++---- storage/innobase/fts/fts0que.cc | 18 ++++++++--------- storage/innobase/fts/fts0sql.cc | 2 +- storage/innobase/gis/gis0rtree.cc | 6 +++--- storage/innobase/handler/ha_innodb.cc | 12 +++++------ storage/innobase/handler/ha_innodb.h | 6 +++--- storage/innobase/handler/handler0alter.cc | 10 +++++----- storage/innobase/handler/i_s.cc | 4 ++-- storage/innobase/include/data0data.h | 2 +- storage/innobase/include/db0err.h | 4 ++-- storage/innobase/include/dict0boot.h | 2 +- storage/innobase/include/dict0dict.h | 2 +- storage/innobase/include/dict0mem.h | 4 ++-- storage/innobase/include/dict0types.h | 2 +- storage/innobase/include/fil0crypt.h | 4 ++-- storage/innobase/include/fil0fil.h | 2 +- storage/innobase/include/fsp0fsp.h | 2 +- storage/innobase/include/fts0fts.h | 2 +- storage/innobase/include/fts0priv.h | 2 +- storage/innobase/include/fts0types.h | 2 +- storage/innobase/include/lock0lock.h | 2 +- storage/innobase/include/lock0priv.h | 2 +- storage/innobase/include/lock0types.h | 2 +- storage/innobase/include/mtr0types.h | 2 +- storage/innobase/include/row0ftsort.h | 2 +- storage/innobase/include/row0mysql.h | 2 +- storage/innobase/include/row0quiesce.h | 2 +- storage/innobase/include/row0undo.h | 2 +- storage/innobase/include/srv0mon.h | 10 +++++----- storage/innobase/include/srv0srv.h | 4 ++-- storage/innobase/include/trx0purge.h | 2 +- storage/innobase/include/trx0sys.h | 2 +- storage/innobase/include/ut0lst.h | 2 +- storage/innobase/include/ut0pool.h | 2 +- storage/innobase/lock/lock0lock.cc | 14 ++++++------- storage/innobase/lock/lock0prdt.cc | 2 +- storage/innobase/log/log0recv.cc | 4 ++-- storage/innobase/log/log0sync.cc | 8 ++++---- storage/innobase/os/os0file.cc | 2 +- storage/innobase/page/page0page.cc | 2 +- storage/innobase/pars/pars0opt.cc | 2 +- storage/innobase/read/read0read.cc | 2 +- storage/innobase/row/row0ftsort.cc | 6 +++--- storage/innobase/row/row0import.cc | 12 +++++------ storage/innobase/row/row0ins.cc | 2 +- storage/innobase/row/row0merge.cc | 6 +++--- storage/innobase/row/row0mysql.cc | 6 +++--- storage/innobase/row/row0purge.cc | 2 +- storage/innobase/row/row0sel.cc | 6 +++--- storage/innobase/row/row0vers.cc | 4 ++-- storage/innobase/srv/srv0mon.cc | 2 +- storage/innobase/srv/srv0srv.cc | 2 +- storage/innobase/trx/trx0roll.cc | 2 +- storage/innobase/trx/trx0undo.cc | 2 +- storage/innobase/ut/ut0rbt.cc | 4 ++-- storage/maria/aria_pack.c | 2 +- storage/maria/ha_maria.cc | 2 +- storage/maria/ha_s3.cc | 10 +++++----- storage/maria/ma_backup.c | 2 +- storage/maria/ma_bitmap.c | 10 +++++----- storage/maria/ma_blockrec.c | 6 +++--- storage/maria/ma_check.c | 6 +++--- storage/maria/ma_commit.c | 2 +- storage/maria/ma_delete.c | 2 +- storage/maria/ma_dynrec.c | 2 +- storage/maria/ma_extra.c | 2 +- storage/maria/ma_ft_stem.c | 2 +- storage/maria/ma_key_recover.c | 4 ++-- storage/maria/ma_loghandler.c | 10 +++++----- storage/maria/ma_open.c | 4 ++-- storage/maria/ma_packrec.c | 2 +- storage/maria/ma_page.c | 2 +- storage/maria/ma_pagecache.c | 16 +++++++-------- storage/maria/ma_recovery.c | 4 ++-- storage/maria/ma_rfirst.c | 2 +- storage/maria/ma_rt_split.c | 2 +- storage/maria/ma_search.c | 2 +- storage/maria/ma_sort.c | 2 +- storage/maria/ma_sp_key.c | 2 +- storage/maria/ma_state.c | 2 +- storage/maria/ma_update.c | 2 +- storage/maria/ma_write.c | 8 ++++---- storage/maria/maria_def.h | 10 +++++----- storage/maria/s3_func.c | 2 +- storage/maria/tablockman.c | 2 +- storage/maria/unittest/ma_control_file-t.c | 4 ++-- storage/maria/unittest/ma_pagecache_single.c | 2 +- storage/maria/unittest/sequence_storage.c | 2 +- storage/myisam/ha_myisam.cc | 6 +++--- storage/myisam/mi_check.c | 4 ++-- storage/myisam/mi_extra.c | 2 +- storage/myisam/mi_packrec.c | 2 +- storage/myisam/mi_range.c | 2 +- storage/myisam/mi_search.c | 2 +- storage/myisam/mi_test1.c | 2 +- storage/myisam/myisampack.c | 2 +- storage/myisammrg/ha_myisammrg.cc | 4 ++-- storage/myisammrg/ha_myisammrg.h | 2 +- storage/myisammrg/myrg_extra.c | 2 +- storage/oqgraph/oqgraph_shim.h | 2 +- storage/perfschema/ha_perfschema.h | 2 +- storage/perfschema/pfs_global.h | 2 +- storage/perfschema/pfs_program.h | 2 +- storage/perfschema/pfs_server.h | 2 +- storage/perfschema/pfs_timer.h | 2 +- storage/perfschema/pfs_variable.cc | 2 +- ...le_replication_connection_configuration.cc | 2 +- storage/rocksdb/ha_rocksdb.cc | 14 ++++++------- storage/rocksdb/properties_collector.h | 4 ++-- storage/rocksdb/rdb_cf_options.cc | 2 +- storage/rocksdb/rdb_converter.cc | 2 +- storage/rocksdb/rdb_converter.h | 2 +- storage/rocksdb/rdb_datadic.cc | 6 +++--- storage/rocksdb/rdb_mutex_wrapper.h | 2 +- storage/rocksdb/rdb_utils.cc | 4 ++-- storage/rocksdb/rdb_utils.h | 2 +- storage/rocksdb/ut0counter.h | 2 +- storage/spider/ha_spider.cc | 2 +- storage/spider/spd_db_include.h | 2 +- storage/spider/spd_param.cc | 2 +- storage/spider/spd_ping_table.cc | 2 +- 170 files changed, 315 insertions(+), 315 deletions(-) diff --git a/storage/archive/azio.c b/storage/archive/azio.c index 01911b4bfa4..9ca3559821e 100644 --- a/storage/archive/azio.c +++ b/storage/archive/azio.c @@ -215,7 +215,7 @@ int write_header(azio_stream *s) int8store(ptr + AZ_CHECK_POS, (unsigned long long)s->check_point); /* Start of Data Block Index Block */ int8store(ptr + AZ_AUTOINCREMENT_POS, (unsigned long long)s->auto_increment); /* Start of Data Block Index Block */ int4store(ptr+ AZ_LONGEST_POS , s->longest_row); /* Longest row */ - int4store(ptr+ AZ_SHORTEST_POS, s->shortest_row); /* Shorest row */ + int4store(ptr+ AZ_SHORTEST_POS, s->shortest_row); /* Shortest row */ int4store(ptr+ AZ_FRM_POS, AZHEADER_SIZE + AZMETA_BUFFER_SIZE); /* FRM position */ *(ptr + AZ_DIRTY_POS)= (unsigned char)s->dirty; /* Start of Data Block Index Block */ diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index 3a4d3cc0a59..e60ee9db5e5 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -41,8 +41,8 @@ We keep a file pointer open for each instance of ha_archive for each read but for writes we keep one open file handle just for that. We flush it - only if we have a read occur. azip handles compressing lots of records - at once much better then doing lots of little records between writes. + only if we have a read occur. azio handles compressing lots of records + at once much better than doing lots of little records between writes. It is possible to not lock on writes but this would then mean we couldn't handle bulk inserts as well (that is if someone was trying to read at the same time since we would want to flush). @@ -60,7 +60,7 @@ At some point a recovery method for such a drastic case needs to be divised. - Locks are row level, and you will get a consistant read. + Locks are row level, and you will get a consistent read. For performance as far as table scans go it is quite fast. I don't have good numbers but locally it has out performed both Innodb and MyISAM. For @@ -1010,7 +1010,7 @@ int ha_archive::write_row(const uchar *buf) temp_auto= table->next_number_field->val_int(); /* - We don't support decremening auto_increment. They make the performance + We don't support decrementing auto_increment. They make the performance just cry. */ if (temp_auto <= share->archive_write.auto_increment && diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h index 19110b3e7a8..497f7c06217 100644 --- a/storage/archive/ha_archive.h +++ b/storage/archive/ha_archive.h @@ -52,7 +52,7 @@ public: /* Version for file format. 1 - Initial Version (Never Released) - 2 - Stream Compression, seperate blobs, no packing + 2 - Stream Compression, separate blobs, no packing 3 - One stream (row and blobs), with packing */ #define ARCHIVE_VERSION 3 diff --git a/storage/connect/array.h b/storage/connect/array.h index af8e101594a..2f8ba86ab56 100644 --- a/storage/connect/array.h +++ b/storage/connect/array.h @@ -107,7 +107,7 @@ class DllExport ARRAY : public XOBJECT, public CSORT { // Array descblock /* This class is used when constructing the arrays of constants used */ /* for indexing. Its only purpose is to provide a way to sort, reduce */ /* and reorder the arrays of multicolumn indexes as one block. Indeed */ -/* sorting the arrays independantly would break the correspondance of */ +/* sorting the arrays independently would break the correspondence of */ /* column values. */ /***********************************************************************/ class MULAR : public CSORT, public BLOCK { // No need to be an XOBJECT diff --git a/storage/connect/csort.cpp b/storage/connect/csort.cpp index 1e4ba674e23..a1d48587115 100644 --- a/storage/connect/csort.cpp +++ b/storage/connect/csort.cpp @@ -95,7 +95,7 @@ CSORT::CSORT(bool cns, int th, int mth) } // end of CSORT constructor /***********************************************************************/ -/* CSORT intialization. */ +/* CSORT initialization. */ /***********************************************************************/ int CSORT::Qsort(PGLOBAL g, int nb) { diff --git a/storage/connect/filamgz.cpp b/storage/connect/filamgz.cpp index 5b965f63926..8ffc8002f5a 100644 --- a/storage/connect/filamgz.cpp +++ b/storage/connect/filamgz.cpp @@ -339,7 +339,7 @@ int GZFAM::ReadBuffer(PGLOBAL g) *p = '\0'; // Eliminate ending new-line character if (*(--p) == '\r') - *p = '\0'; // Eliminate eventuel carriage return + *p = '\0'; // Eliminate eventual carriage return strcpy(Tdbp->GetLine(), To_Buf); IsRead = true; diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index d449fc1d1c5..5e4526e437b 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -352,7 +352,7 @@ int TXTFAM::StoreValues(PGLOBAL g, bool upd) /* record are not necessarily updated in sequential order. */ /* Moving intermediate lines cannot be done while making them because */ /* this can cause extra wrong records to be included in the new file. */ -/* What we do here is to reorder the updated records and do all the */ +/* What we do here is reorder the updated records and do all the */ /* updates ordered by record position. */ /***********************************************************************/ int TXTFAM::UpdateSortedRows(PGLOBAL g) @@ -402,7 +402,7 @@ err: /***********************************************************************/ /* DeleteSortedRows. When deleting using indexing, the issue is that */ /* record are not necessarily deleted in sequential order. Moving */ -/* intermediate lines cannot be done while deleing them because */ +/* intermediate lines cannot be done while deleting them because */ /* this can cause extra wrong records to be included in the new file. */ /* What we do here is to reorder the deleted record and delete from */ /* the file from the ordered deleted records. */ diff --git a/storage/connect/filter.cpp b/storage/connect/filter.cpp index cddb8b8c526..b1f93b36cc3 100644 --- a/storage/connect/filter.cpp +++ b/storage/connect/filter.cpp @@ -547,7 +547,7 @@ bool FILTER::FindJoinFilter(POPJOIN opj, PFIL fprec, bool teq, bool tek, return (Opc < 0); // Keep only equi-joins and specific joins (Outer and Distinct) - // Normally specific join operators comme first because they have + // Normally specific join operators come first because they have // been placed first by SortJoin. if (teq && Opc > OP_EQ) return FALSE; @@ -747,7 +747,7 @@ bool FILTER::CheckHaving(PGLOBAL g, PSQL sqlp) return FALSE; default: if (CheckColumn(g, sqlp, xp, agg) < -1) - return TRUE; // Unrecovable error + return TRUE; // Unrecoverable error break; } // endswitch Opc diff --git a/storage/connect/fmdlex.c b/storage/connect/fmdlex.c index 165913a9698..54d990e7bdb 100644 --- a/storage/connect/fmdlex.c +++ b/storage/connect/fmdlex.c @@ -845,7 +845,7 @@ static int yy_get_next_buffer() { /* Don't try to fill the buffer, so this is an EOF. */ if ( yy_c_buf_p - yytext_ptr - YY_MORE_ADJ == 1 ) { - /* We matched a singled characater, the EOB, so + /* We matched a singled character, the EOB, so * treat this as a final EOF. */ return EOB_ACT_END_OF_FILE; diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 3757d0d1c03..3ff3121810b 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -4990,7 +4990,7 @@ int ha_connect::external_lock(THD *thd, int lock_type) } // endelse Xchk if (CloseTable(g)) { - // This is an error while builing index + // This is an error while building index // Make it a warning to avoid crash push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, g->Message); rc= 0; diff --git a/storage/connect/ha_connect.h b/storage/connect/ha_connect.h index 3bac15bf426..cbef6ef0338 100644 --- a/storage/connect/ha_connect.h +++ b/storage/connect/ha_connect.h @@ -519,7 +519,7 @@ protected: char *GetDBfromName(const char *name); // Members - static ulong num; // Tracable handler number + static ulong num; // Traceable handler number PCONNECT xp; // To user_connect associated class ulong hnum; // The number of this handler query_id_t valid_query_id; // The one when tdbp was allocated diff --git a/storage/connect/inihandl.cpp b/storage/connect/inihandl.cpp index cd06f7fadd7..377c668ac0d 100644 --- a/storage/connect/inihandl.cpp +++ b/storage/connect/inihandl.cpp @@ -1318,7 +1318,7 @@ BOOL WritePrivateProfileSection(LPCSTR section, * - note that this means if the buffer was to small to return even just * the first section name then a single '\0' will be returned. * - the return value is the number of characters written in the buffer, - * except if the buffer was too smal in which case len-2 is returned + * except if the buffer was too small in which case len-2 is returned * * Win2000: * - if the buffer is 0, 1 or 2 characters long then it is filled with diff --git a/storage/connect/odbconn.cpp b/storage/connect/odbconn.cpp index 520b82d51c2..a78257087ff 100644 --- a/storage/connect/odbconn.cpp +++ b/storage/connect/odbconn.cpp @@ -281,7 +281,7 @@ static CATPARM *AllocCatInfo(PGLOBAL g, CATINFO fid, PCSZ db, cap->Status = (UWORD *)PlugSubAlloc(g, NULL, m * sizeof(UWORD)); } catch (int n) { - htrc("Exeption %d: %s\n", n, g->Message); + htrc("Exception %d: %s\n", n, g->Message); cap = NULL; } catch (const char *msg) { htrc(g->Message, msg); diff --git a/storage/connect/plgcnx.h b/storage/connect/plgcnx.h index 1b341bc5275..5f46dc8e404 100644 --- a/storage/connect/plgcnx.h +++ b/storage/connect/plgcnx.h @@ -17,7 +17,7 @@ /**************************************************************************/ enum FNRC {RC_LICENSE = 7, /* PLGConnect prompt for license key */ RC_PASSWD = 6, /* PLGConnect prompt for User/Pwd */ - RC_SUCWINFO = 5, /* Succes With Info return code */ + RC_SUCWINFO = 5, /* Success With Info return code */ RC_SOCKET = 4, /* RC from PLGConnect to socket DLL */ RC_PROMPT = 3, /* Intermediate prompt return */ RC_CANCEL = 2, /* Command was cancelled by user */ @@ -25,7 +25,7 @@ enum FNRC {RC_LICENSE = 7, /* PLGConnect prompt for license key */ RC_SUCCESS = 0, /* Successful function (must be 0) */ RC_MEMORY = -1, /* Storage allocation error */ RC_TRUNCATED = -2, /* Result has been truncated */ - RC_TIMEOUT = -3, /* Connection timeout occurred */ + RC_TIMEOUT = -3, /* Connection timeout occurred */ RC_TOOBIG = -4, /* Data is too big for connection */ RC_KEY = -5, /* Null ptr to key in Connect */ /* or bad key in other functions */ diff --git a/storage/connect/plgdbsem.h b/storage/connect/plgdbsem.h index 4371f90a21d..4b709154262 100644 --- a/storage/connect/plgdbsem.h +++ b/storage/connect/plgdbsem.h @@ -535,7 +535,7 @@ enum XFLD {FLD_NO = 0, /* Not a field definition item */ FLD_KEY = 11, /* Field key property */ FLD_DEFAULT = 12, /* Field default value */ FLD_EXTRA = 13, /* Field extra info */ - FLD_PRIV = 14, /* Field priviledges */ + FLD_PRIV = 14, /* Field privileges */ FLD_DATEFMT = 15, /* Field date format */ FLD_FORMAT = 16, /* Field format */ FLD_CAT = 17, /* Table catalog */ diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp index 68641129dc9..21b9ad71465 100644 --- a/storage/connect/plgdbutl.cpp +++ b/storage/connect/plgdbutl.cpp @@ -496,17 +496,17 @@ bool PlugEvalLike(PGLOBAL g, LPCSTR strg, LPCSTR pat, bool ci) /* */ /* The Like predicate is true if: */ /* */ -/* 1- A subtring of M is a sequence of 0 or more contiguous of M */ +/* 1- A substring of M is a sequence of 0 or more contiguous of M*/ /* and each of M is part of exactly one substring. */ /* */ -/* 2- If the i-th of P is an , the i-th subtring of M is any single . */ +/* 2- If the i-th of P is an , the i-th substring of M is any single . */ /* */ -/* 3- If the i-th of P is an , then the i-th subtring of M is any sequence of zero */ +/* 3- If the i-th of P is an , then the i-th substring of M is any sequence of zero*/ /* or more . */ /* */ -/* 4- If the i-th of P is neither an of P is neither an nor an , then */ /* the i-th substring of M is equal to that */ /* according to the collating sequence of the , */ @@ -514,7 +514,7 @@ bool PlugEvalLike(PGLOBAL g, LPCSTR strg, LPCSTR pat, bool ci) /* length as that . */ /* */ /* 5- The number of substrings of M is equal to the number of */ -/* of P. */ +/* of P. */ /* */ /* Otherwise M like P is false. */ /***********************************************************************/ @@ -572,7 +572,7 @@ bool EvalLikePattern(LPCSTR sp, LPCSTR tp) b = (t || !*sp); /* true if % or void strg. */ else if (!t) { /*******************************************************************/ - /* No character to skip, check occurrence of */ + /* No character to skip, check occurrence of */ /* at the very beginning of remaining string. */ /*******************************************************************/ if (p) { @@ -586,7 +586,7 @@ bool EvalLikePattern(LPCSTR sp, LPCSTR tp) if (p) /*****************************************************************/ /* Here is the case explaining why we need a recursive routine. */ - /* The test must be done not only against the first occurrence */ + /* The test must be done not only against the first occurrence */ /* of the in the remaining string, */ /* but also with all eventual succeeding ones. */ /*****************************************************************/ @@ -1080,7 +1080,7 @@ DllExport PSZ GetIniString(PGLOBAL g, void *mp, LPCSTR sec, LPCSTR key, #endif // 0 /***********************************************************************/ -/* GetAmName: return the name correponding to an AM code. */ +/* GetAmName: return the name corresponding to an AM code. */ /***********************************************************************/ char *GetAmName(PGLOBAL g, AMT am, void *memp) { diff --git a/storage/connect/plgxml.cpp b/storage/connect/plgxml.cpp index d78ba120ebc..38fabfb1984 100644 --- a/storage/connect/plgxml.cpp +++ b/storage/connect/plgxml.cpp @@ -171,7 +171,7 @@ void XMLNODE::Delete(PXNODE dnp) } // end of Delete /******************************************************************/ -/* Store a string in Buf, enventually reallocating it. */ +/* Store a string in Buf, eventually reallocating it. */ /******************************************************************/ char *XMLNODE::BufAlloc(PGLOBAL g, const char *p, int n) { diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp index 9af9faa333a..f8aef64496e 100644 --- a/storage/connect/reldef.cpp +++ b/storage/connect/reldef.cpp @@ -446,7 +446,7 @@ int TABDEF::GetColCatInfo(PGLOBAL g) //case RECFM_OCCUR: //case RECFM_PRX: case RECFM_OEM: - poff = 0; // Offset represents an independant flag + poff = 0; // Offset represents an independent flag break; default: // PLG ODBC JDBC MYSQL WMI... poff = 0; // NA diff --git a/storage/connect/reldef.h b/storage/connect/reldef.h index 64f3a5e0022..0f4dcb8e60c 100644 --- a/storage/connect/reldef.h +++ b/storage/connect/reldef.h @@ -152,14 +152,14 @@ class DllExport OEMDEF : public TABDEF { /* OEM table */ void *Hdll; /* Handle for the loaded shared library */ #endif // !_WIN32 PTABDEF Pxdef; /* Pointer to the external TABDEF class */ - char *Module; /* Path/Name of the DLL implenting it */ + char *Module; /* Path/Name of the DLL implementing it */ char *Subtype; /* The name of the OEM table sub type */ }; // end of OEMDEF /***********************************************************************/ /* Column definition block used during creation. */ /***********************************************************************/ -class DllExport COLCRT : public BLOCK { /* Column description block */ +class DllExport COLCRT : public BLOCK { /* Column description block */ friend class TABDEF; public: COLCRT(PSZ name); // Constructor diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index 0fdc182f6df..8bf7ce9bc6c 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -186,7 +186,7 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int) } // end of DefineAM /***********************************************************************/ -/* Get the full path/name of the optization file. */ +/* Get the full path/name of the optimization file. */ /***********************************************************************/ bool DOSDEF::GetOptFileName(PGLOBAL g, char *filename) { @@ -210,7 +210,7 @@ bool DOSDEF::GetOptFileName(PGLOBAL g, char *filename) } // end of GetOptFileName /***********************************************************************/ -/* After an optimize error occurred, remove all set optimize values. */ +/* After an optimize error occurred, remove all set optimize values. */ /***********************************************************************/ void DOSDEF::RemoveOptValues(PGLOBAL g) { @@ -1619,7 +1619,7 @@ void TDBDOS::ResetBlockFilter(PGLOBAL g) /* RC_OK: if some records in the block can meet filter criteria. */ /* RC_NF: if no record in the block can meet filter criteria. */ /* RC_EF: if no record in the remaining file can meet filter criteria.*/ -/* In addition, temporarily supress filtering if all the records in */ +/* In addition, temporarily suppress filtering if all the records in */ /* the block meet filter criteria. */ /***********************************************************************/ int TDBDOS::TestBlock(PGLOBAL g) diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp index 443d5a9c87f..b05e0d07a1e 100644 --- a/storage/connect/tabfmt.cpp +++ b/storage/connect/tabfmt.cpp @@ -1219,7 +1219,7 @@ PCOL TDBFMT::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) /***********************************************************************/ /* FMT EstimatedLength. Returns an estimated minimum line length. */ -/* The big problem here is how can we astimated that minimum ? */ +/* The big problem here is how can we estimate that minimum ? */ /***********************************************************************/ int TDBFMT::EstimatedLength(void) { diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp index 0242832b02f..07ee4e64c8e 100644 --- a/storage/connect/tabjdbc.cpp +++ b/storage/connect/tabjdbc.cpp @@ -289,7 +289,7 @@ PTDB JDBCDEF::GetTable(PGLOBAL g, MODE m) /* containing the entire result of the executed query. This can be an */ /* issue for big tables and memory error can occur. An alternative is */ /* to use streaming (reading one row at a time) but to specify this, */ -/* a fech size of the integer min value must be send to the driver. */ +/* a fetch size of the integer min value must be send to the driver. */ /***********************************************************************/ int JDBCPARM::CheckSize(int rows) { diff --git a/storage/connect/tabmul.cpp b/storage/connect/tabmul.cpp index 7e2857f17a0..70e09ab81bf 100644 --- a/storage/connect/tabmul.cpp +++ b/storage/connect/tabmul.cpp @@ -676,7 +676,7 @@ char* TDBDIR::Path(PGLOBAL g) PlugSetPath(Fpath, To_File, defp ? defp->GetPath() : NULL); _splitpath(Fpath, Drive, Direc, Fname, Ftype); } else - _makepath(Fpath, Drive, Direc, Fname, Ftype); // Usefull for TDBSDR + _makepath(Fpath, Drive, Direc, Fname, Ftype); // Useful for TDBSDR return Fpath; #else // !_WIN32 diff --git a/storage/connect/tabpivot.cpp b/storage/connect/tabpivot.cpp index da5885f9914..d43e93ae7eb 100644 --- a/storage/connect/tabpivot.cpp +++ b/storage/connect/tabpivot.cpp @@ -265,7 +265,7 @@ PQRYRES PIVAID::MakePivotColumns(PGLOBAL g) ndif = qrp->Nblin; } // endif Tabsrc - // Allocate the Value used to retieve column names + // Allocate the Value used to retrieve column names if (!(valp = AllocateValue(g, Rblkp->GetType(), Rblkp->GetVlen(), Rblkp->GetPrec()))) diff --git a/storage/connect/tabrest.cpp b/storage/connect/tabrest.cpp index e75e2006905..cb8135d7d72 100644 --- a/storage/connect/tabrest.cpp +++ b/storage/connect/tabrest.cpp @@ -95,7 +95,7 @@ int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename) char fn[600]; pid_t pID; - // Check if curl package is availabe by executing subprocess + // Check if curl package is available by executing subprocess FILE *f= popen("command -v curl", "r"); if (!f) { diff --git a/storage/connect/tabvct.cpp b/storage/connect/tabvct.cpp index f5710688d3c..9d22d135889 100644 --- a/storage/connect/tabvct.cpp +++ b/storage/connect/tabvct.cpp @@ -290,7 +290,7 @@ PCOL TDBVCT::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) /***********************************************************************/ bool TDBVCT::IsUsingTemp(PGLOBAL) { - // For developpers + // For developers return (UseTemp() == TMP_TEST); } // end of IsUsingTemp diff --git a/storage/connect/tabwmi.cpp b/storage/connect/tabwmi.cpp index 1cd46a7442c..69ef529bf5c 100644 --- a/storage/connect/tabwmi.cpp +++ b/storage/connect/tabwmi.cpp @@ -599,7 +599,7 @@ int TDBWMI::GetMaxSize(PGLOBAL g) /*******************************************************************/ /* Loop enumerating to get the count. This is prone to last a */ /* very long time for some classes such as DataFile, this is why */ - /* we just return an estimated value that will be ajusted later. */ + /* we just return an estimated value that will be adjusted later. */ /*******************************************************************/ MaxSize = Ems; #if 0 @@ -619,7 +619,7 @@ int TDBWMI::GetMaxSize(PGLOBAL g) break; MaxSize++; - } // endwile Enumerator + } // endwhile Enumerator Res = Enumerator->Reset(); #endif // 0 diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp index d5302759bc4..0ce0f4fd836 100644 --- a/storage/connect/tabxml.cpp +++ b/storage/connect/tabxml.cpp @@ -382,7 +382,7 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info) xcp->Found = false; } // endfor xcp - } // endor i + } // endfor i txmp->CloseDB(g); @@ -818,7 +818,7 @@ int TDBXML::LoadTableFile(PGLOBAL g, char *filename) /***********************************************************************/ /* Initialize the processing of the XML file. */ -/* Note: this function can be called several times, eventally before */ +/* Note: this function can be called several times, eventually before */ /* the columns are known (from TBL for instance) */ /***********************************************************************/ bool TDBXML::Initialize(PGLOBAL g) diff --git a/storage/connect/unzip.c b/storage/connect/unzip.c index de69f4b62eb..6c620fb14cd 100644 --- a/storage/connect/unzip.c +++ b/storage/connect/unzip.c @@ -53,8 +53,8 @@ Oct-2009 - Mathias Svensson - Fixed problem if uncompressed size was > 4G and compressed size was <4G should only read the compressed/uncompressed size from the Zip64 format if the size from normal header was 0xFFFFFFFF - Oct-2009 - Mathias Svensson - Applied some bug fixes from paches recived from Gilles Vollant - Oct-2009 - Mathias Svensson - Applied support to unzip files with compression mathod BZIP2 (bzip2 lib is required) + Oct-2009 - Mathias Svensson - Applied some bug fixes from patches received from Gilles Vollant + Oct-2009 - Mathias Svensson - Applied support to unzip files with compression method BZIP2 (bzip2 lib is required) Patch created by Daniel Borca Jan-2010 - back to unzip and minizip 1.0 name scheme, with compatibility layer @@ -847,7 +847,7 @@ extern int ZEXPORT unzGetGlobalInfo (unzFile file, unz_global_info* pglobal_info return UNZ_OK; } /* - Translate date/time from Dos format to tm_unz (readable more easilty) + Translate date/time from Dos format to tm_unz (readable more easily) */ local void unz64local_DosDateToTmuDate (ZPOS64_T ulDosDate, tm_unz* ptm) { diff --git a/storage/connect/user_connect.cc b/storage/connect/user_connect.cc index 4b179ef55d0..2b27901e03c 100644 --- a/storage/connect/user_connect.cc +++ b/storage/connect/user_connect.cc @@ -20,8 +20,8 @@ Implements the user_connect class. @details - To support multi_threading, each query creates and use a PlugDB "user" - that is a connection with its personnal memory allocation. + To support multi_threading, each query creates and uses a PlugDB "user" + that is a connection with its private memory allocation. @note Author Olivier Bertrand diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp index e64c4813046..28459383e07 100644 --- a/storage/connect/value.cpp +++ b/storage/connect/value.cpp @@ -1775,7 +1775,7 @@ DECVAL::DECVAL(PGLOBAL g, PSZ s, int n, int prec, bool uns) } // end of DECVAL constructor /***********************************************************************/ -/* DECIMAL: Check whether the numerica value is equal to 0. */ +/* DECIMAL: Check whether the numerical value is equal to 0. */ /***********************************************************************/ bool DECVAL::IsZero(void) { diff --git a/storage/connect/xindex.h b/storage/connect/xindex.h index dcd98582e2d..a1aa06d6793 100644 --- a/storage/connect/xindex.h +++ b/storage/connect/xindex.h @@ -40,7 +40,7 @@ typedef struct index_val : public BLOCK { index_val(PXOB xp) {Next = NULL; Xval = xp; Kp = NULL;} PIVAL Next; // Next value PXOB Xval; // To value or array - int *Kp; // The coordonates in a LSTBLK + int *Kp; // The coordinates in a LSTBLK } IVAL; typedef struct index_col : public BLOCK { diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index 4eff11f0c36..046af7d6283 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -299,7 +299,7 @@ error: DESCRIPTION Read the meta-file info. For now we are only interested in - rows counf, crashed bit and magic number. + rows count, crashed bit and magic number. RETURN 0 - OK @@ -1006,7 +1006,7 @@ int ha_tina::open(const char *name, int mode, uint open_options) /* - Close a database file. We remove ourselves from the shared strucutre. + Close a database file. We remove ourselves from the shared structure. If it is empty we destroy it. */ int ha_tina::close(void) @@ -1292,7 +1292,7 @@ void ha_tina::position(const uchar *record) /* - Used to fetch a row from a posiion stored with ::position(). + Used to fetch a row from a position stored with ::position(). my_get_ptr() retrieves the data for you. */ @@ -1397,7 +1397,7 @@ int ha_tina::rnd_end() /* The sort is needed when there were updates/deletes with random orders. - It sorts so that we move the firts blocks to the beginning. + It sorts so that we move the first blocks to the beginning. */ my_qsort(chain, (size_t)(chain_ptr - chain), sizeof(tina_set), (qsort_cmp)sort_set); diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index aded8987076..b8423080753 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -284,7 +284,7 @@ ------- There is a test for MySQL Federated Storage Handler in ./mysql-test/t, - federatedd.test It starts both a slave and master database using + federated.test It starts both a slave and master database using the same setup that the replication tests use, with the exception that it turns off replication, and sets replication to ignore the test tables. After ensuring that you actually do have support for the federated storage @@ -3268,7 +3268,7 @@ bool ha_federated::get_error_message(int error, String* buf) @details Call @c mysql_store_result() to save a result set then append it to the stored results array. - @param[in] mysql_arg MySLQ connection structure. + @param[in] mysql_arg MySQL connection structure. @return Stored result set (MYSQL_RES object). */ diff --git a/storage/federated/ha_federated.h b/storage/federated/ha_federated.h index bf9b0662ee1..bfdb78af0dc 100644 --- a/storage/federated/ha_federated.h +++ b/storage/federated/ha_federated.h @@ -198,7 +198,7 @@ public: } const key_map *keys_to_use_for_scanning() override { return &key_map_full; } /* - Everything below are methods that we implment in ha_federated.cc. + Everything below are methods that we implement in ha_federated.cc. Most of these methods are not obligatory, skip them and MySQL will treat them as not implemented diff --git a/storage/heap/hp_create.c b/storage/heap/hp_create.c index f35e8e3fac9..20a099d4159 100644 --- a/storage/heap/hp_create.c +++ b/storage/heap/hp_create.c @@ -24,8 +24,8 @@ static void init_block(HP_BLOCK *block, size_t reclength, ulong min_records, /* In how many parts are we going to do allocations of memory and indexes - If we assigne 1M to the heap table memory, we will allocate roughly - (1M/16) bytes per allocaiton + If we assign 1M to the heap table memory, we will allocate roughly + (1M/16) bytes per allocation */ static const int heap_allocation_parts= 16; @@ -361,7 +361,7 @@ static void init_block(HP_BLOCK *block, size_t reclength, ulong min_records, block->records_in_block= records_in_block; block->recbuffer= recbuffer; block->last_allocated= 0L; - /* All alloctions are done with this size, if possible */ + /* All allocations are done with this size, if possible */ block->alloc_size= alloc_size - MALLOC_OVERHEAD; for (i= 0; i <= HP_MAX_LEVELS; i++) diff --git a/storage/heap/hp_hash.c b/storage/heap/hp_hash.c index e00de4c835a..5e31c2c4557 100644 --- a/storage/heap/hp_hash.c +++ b/storage/heap/hp_hash.c @@ -375,7 +375,7 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const uchar *rec) RETURN 0 Key is identical - <> 0 Key differes + <> 0 Key differs */ int hp_rec_key_cmp(HP_KEYDEF *keydef, const uchar *rec1, const uchar *rec2) diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc index 5b81adde307..49236a173d2 100644 --- a/storage/innobase/btr/btr0btr.cc +++ b/storage/innobase/btr/btr0btr.cc @@ -67,7 +67,7 @@ either in S (shared) or X (exclusive) mode and block->lock was not acquired on node pointer pages. After MariaDB 10.2.2, block->lock S-latch or X-latch is used to protect -node pointer pages and obtaiment of node pointer page latches is protected by +node pointer pages and obtainment of node pointer page latches is protected by index->lock. (0) Definition: B-tree level. @@ -130,7 +130,7 @@ NOTE: New rules after MariaDB 10.2.2 does not affect the latching rules of leaf index->lock S-latch is needed in read for the node pointer traversal. When the leaf level is reached, index-lock can be released (and with the MariaDB 10.2.2 changes, all -node pointer latches). Left to right index travelsal in leaf page level can be safely done +node pointer latches). Left to right index traversal in leaf page level can be safely done by obtaining right sibling leaf page latch and then releasing the old page latch. Single leaf page modifications (BTR_MODIFY_LEAF) are protected by index->lock diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index d34656abc2b..50453ba435d 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -5639,7 +5639,7 @@ static void btr_blob_free(buf_block_t *block, bool all, mtr_t *mtr) if (!buf_LRU_free_page(&block->page, all) && all && block->page.zip.data) /* Attempt to deallocate the redundant copy of the uncompressed page - if the whole ROW_FORMAT=COMPRESSED block cannot be deallocted. */ + if the whole ROW_FORMAT=COMPRESSED block cannot be deallocated. */ buf_LRU_free_page(&block->page, false); mysql_mutex_unlock(&buf_pool.mutex); diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 5d61b5f55c9..f204e7d3a7e 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -971,7 +971,7 @@ ATTRIBUTE_COLD void buf_mem_pressure_shutdown() #if defined(DBUG_OFF) && defined(HAVE_MADVISE) && defined(MADV_DODUMP) /** Enable buffers to be dumped to core files -A convience function, not called anyhwere directly however +A convenience function, not called anywhere directly however it is left available for gdb or any debugger to call in the event that you want all of the memory to be dumped to a core file. @@ -2841,7 +2841,7 @@ loop: well as error handling takes place at a lower level. Here we only need to know whether the page really is corrupted, or if an encrypted page with a valid - checksum cannot be decypted. */ + checksum cannot be decrypted. */ switch (dberr_t local_err = buf_read_page(page_id, chain)) { case DB_SUCCESS: diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc index 51ccadd2612..9978a9640c7 100644 --- a/storage/innobase/buf/buf0dblwr.cc +++ b/storage/innobase/buf/buf0dblwr.cc @@ -19,7 +19,7 @@ this program; if not, write to the Free Software Foundation, Inc., /**************************************************//** @file buf/buf0dblwr.cc -Doublwrite buffer module +Doublewrite buffer module Created 2011/12/19 *******************************************************/ diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 6523d227de2..13cf51cb60e 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -126,7 +126,7 @@ void buf_pool_t::page_cleaner_wakeup(bool for_LRU) noexcept /* if pct_lwm != 0.0, adaptive flushing is enabled. signal buf page cleaner thread - - if pct_lwm <= dirty_pct then it will invoke apdative flushing flow + - if pct_lwm <= dirty_pct then it will invoke adaptive flushing flow - if pct_lwm > dirty_pct then it will invoke idle flushing flow. idle_flushing: @@ -2221,7 +2221,7 @@ static void buf_flush_sync_for_checkpoint(lsn_t lsn) noexcept mysql_mutex_unlock(&buf_pool.flush_list_mutex); } -/** Check if the adpative flushing threshold is recommended based on +/** Check if the adaptive flushing threshold is recommended based on redo log capacity filled threshold. @param oldest_lsn buf_pool.get_oldest_modification() @return true if adaptive flushing is recommended. */ diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index a1d354ca3b6..0bb86b4faa9 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -880,7 +880,7 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index, n_sample_pages = srv_stats_transient_sample_pages; } } else { - /* New logaritmic number of pages that are estimated. + /* New logarithmic number of pages that are estimated. Number of pages estimated should be between 1 and index->stat_index_size. diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 2c64a6b7b10..363275452d6 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -456,7 +456,7 @@ static bool fil_node_open_file(fil_node_t *node, const byte *page, bool no_lsn) } } - /* The node can be opened beween releasing and acquiring fil_system.mutex + /* The node can be opened between releasing and acquiring fil_system.mutex in the above code */ return node->is_open() || fil_node_open_file_low(node, page, no_lsn); } @@ -2162,7 +2162,7 @@ func_exit: df_remote.init(flags); /* Discover the correct file by looking in three possible locations - while avoiding unecessary effort. */ + while avoiding unnecessary effort. */ /* We will always look for an ibd in the default location. */ df_default.make_filepath(nullptr, name, IBD); @@ -2501,7 +2501,7 @@ bool fil_crypt_check(fil_space_crypt_t *crypt_data, const char *f_name) /** Open an ibd tablespace and add it to the InnoDB data structures. This is similar to fil_ibd_open() except that it is used while processing the REDO log, so the data dictionary is not available and very little -validation is done. The tablespace name is extracred from the +validation is done. The tablespace name is extracted from the dbname/tablename.ibd portion of the filename, which assumes that the file is a file-per-table tablespace. Any name will do for now. General tablespace names will be read from the dictionary after it has been diff --git a/storage/innobase/fil/fil0pagecompress.cc b/storage/innobase/fil/fil0pagecompress.cc index baa4aca1b71..8333ef76ae1 100644 --- a/storage/innobase/fil/fil0pagecompress.cc +++ b/storage/innobase/fil/fil0pagecompress.cc @@ -292,7 +292,7 @@ static ulint fil_page_compress_for_non_full_crc32( mach_write_to_2(out_buf + FIL_PAGE_TYPE, FIL_PAGE_PAGE_COMPRESSED); } - /* Set up the actual payload lenght */ + /* Set up the actual payload length */ mach_write_to_2(out_buf + FIL_PAGE_DATA + FIL_PAGE_COMP_SIZE, write_size); diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc index cc55ddd66cc..8ad37304377 100644 --- a/storage/innobase/fsp/fsp0fsp.cc +++ b/storage/innobase/fsp/fsp0fsp.cc @@ -1535,7 +1535,7 @@ MY_ATTRIBUTE((nonnull(1,4,5), warn_unused_result)) @param[out] block inode block @param[out] err error code @return segment inode, page x-latched -@retrval nullptr if the inode is free or corruption was noticed */ +@retval nullptr if the inode is free or corruption was noticed */ static fseg_inode_t* fseg_inode_try_get( @@ -2075,7 +2075,7 @@ take_hinted_page: } /** If the number of unused but reserved pages in a segment is - esser than minimum value of 1/8 of reserved pages or + less than minimum value of 1/8 of reserved pages or 4 * FSP_EXTENT_SIZE and there are at least half of extent size used pages, then we allow a new empty extent to be added to the segment in fseg_alloc_free_page_general(). Otherwise, we use @@ -3306,7 +3306,7 @@ func_exit: { /* Update the FLST_LAST pointer in base node with current valid extent descriptor and mark the FIL_NULL as next in - current extent descriptr */ + current extent descriptor */ flst_write_addr( *header, header->page.frame + hdr_offset + FLST_LAST, diff --git a/storage/innobase/fsp/fsp0sysspace.cc b/storage/innobase/fsp/fsp0sysspace.cc index 7548ac97edc..ac97923291b 100644 --- a/storage/innobase/fsp/fsp0sysspace.cc +++ b/storage/innobase/fsp/fsp0sysspace.cc @@ -953,9 +953,9 @@ SysTablespace::open_or_create( } } - /* Close the curent handles, add space and file info to the + /* Close the current handles, add space and file info to the fil_system cache and the Data Dictionary, and re-open them - in file_system cache so that they stay open until shutdown. */ + in fil_system cache so that they stay open until shutdown. */ mysql_mutex_lock(&fil_system.mutex); ulint node_counter = 0; for (files_t::iterator it = begin; it != end; ++it) { diff --git a/storage/innobase/fts/fts0ast.cc b/storage/innobase/fts/fts0ast.cc index 74d02d63817..dd8a47e992a 100644 --- a/storage/innobase/fts/fts0ast.cc +++ b/storage/innobase/fts/fts0ast.cc @@ -688,7 +688,7 @@ fts_ast_visit( continue; } - /* Process leaf node accroding to its pass.*/ + /* Process leaf node according to its pass.*/ if (oper == FTS_EXIST_SKIP && visit_pass == FTS_PASS_EXIST) { error = visitor(FTS_EXIST, node, arg); diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index 5bf9548f95c..4e2b23a4e87 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -177,7 +177,7 @@ static const char* fts_config_table_insert_values_sql = FTS_TABLE_STATE "', '0');\n" "END;\n"; -/** FTS tokenize parmameter for plugin parser */ +/** FTS tokenize parameter for plugin parser */ struct fts_tokenize_param_t { fts_doc_t* result_doc; /*!< Result doc for tokens */ ulint add_pos; /*!< Added position for tokens */ @@ -2032,7 +2032,7 @@ fts_create_one_index_table( FTS_INDEX_DOC_COUNT_LEN); /* The precise type calculation is as follows: - least signficiant byte: MySQL type code (not applicable for sys cols) + least significant byte: MySQL type code (not applicable for sys cols) second least : DATA_NOT_NULL | DATA_BINARY_TYPE third least : the MySQL charset-collation code (DATA_MTYPE_MAX) */ @@ -4423,7 +4423,7 @@ or greater than fts_max_token_size. @param[in] stopwords stopwords rb tree @param[in] cs token charset @retval true if it is not stopword and length in range -@retval false if it is stopword or lenght not in range */ +@retval false if it is stopword or length not in range */ bool fts_check_token( const fts_string_t* token, diff --git a/storage/innobase/fts/fts0opt.cc b/storage/innobase/fts/fts0opt.cc index 38b45a94142..af4b9eed2a4 100644 --- a/storage/innobase/fts/fts0opt.cc +++ b/storage/innobase/fts/fts0opt.cc @@ -117,10 +117,10 @@ struct fts_zip_t { fts_string_t word; /*!< UTF-8 string */ ulint max_words; /*!< maximum number of words to read - in one pase */ + in one pass */ }; -/** Prepared statemets used during optimize */ +/** Prepared statements used during optimize */ struct fts_optimize_graph_t { /*!< Delete a word from FTS INDEX */ que_t* delete_nodes_graph; @@ -172,7 +172,7 @@ struct fts_optimize_t { ulint n_completed; /*!< Number of FTS indexes that have been optimized */ ibool del_list_regenerated; - /*!< BEING_DELETED list regenarated */ + /*!< BEING_DELETED list regenerated */ }; /** Used by the optimize, to keep state during compacting nodes. */ @@ -2245,7 +2245,7 @@ fts_optimize_read_deleted_doc_id_snapshot( } /*********************************************************************//** -Optimze all the FTS indexes, skipping those that have already been +Optimize all the FTS indexes, skipping those that have already been optimized, since the FTS auxiliary indexes are not guaranteed to be of the same cardinality. @return DB_SUCCESS if all OK */ diff --git a/storage/innobase/fts/fts0que.cc b/storage/innobase/fts/fts0que.cc index 385685e44f8..191678db781 100644 --- a/storage/innobase/fts/fts0que.cc +++ b/storage/innobase/fts/fts0que.cc @@ -199,7 +199,7 @@ struct fts_proximity_t { of the range */ }; -/** The match positions and tokesn to match */ +/** The match positions and tokens to match */ struct fts_phrase_t { fts_phrase_t(const dict_table_t* table) : @@ -244,14 +244,14 @@ struct fts_phrase_t { st_mysql_ftparser* parser; }; -/** Paramter passed to fts phrase match by parser */ +/** Parameter passed to fts phrase match by parser */ struct fts_phrase_param_t { fts_phrase_t* phrase; /*!< Match phrase instance */ ulint token_index; /*!< Index of token to match next */ mem_heap_t* heap; /*!< Heap for word processing */ }; -/** For storing the frequncy of a word/term in a document */ +/** For storing the frequency of a word/term in a document */ struct fts_doc_freq_t { doc_id_t doc_id; /*!< Document id */ ulint freq; /*!< Frequency of a word in a document */ @@ -433,7 +433,7 @@ fts_query_lcs( /* Traverse the table backwards, from the last row to the first and also from the last column to the first. We compute the smaller - common subsequeces first, then use the caluclated values to determine + common subsequences first, then use the calculated values to determine the longest common subsequence. The result will be in TABLE[0][0]. */ for (i = r; i >= 0; --i) { int j; @@ -762,7 +762,7 @@ fts_query_remove_doc_id( } /*******************************************************************//** -Find the doc id in the query set but not in the deleted set, artificialy +Find the doc id in the query set but not in the deleted set, artificially downgrade or upgrade its ranking by a value and make/initialize its ranking under or above its normal range 0 to 1. This is used for Boolean Search operator such as Negation operator, which makes word's contribution to the @@ -822,7 +822,7 @@ fts_query_intersect_doc_id( 2. 'a +b': docs match 'a' is in doc_ids, add doc into intersect if it matches 'b'. if the doc is also in doc_ids, then change the doc's rank, and add 'a' in doc's words. - 3. '+a +b': docs matching '+a' is in doc_ids, add doc into intsersect + 3. '+a +b': docs matching '+a' is in doc_ids, add doc into intersect if it matches 'b' and it's in doc_ids.(multi_exist = true). */ /* Check if the doc id is deleted and it's in our set */ @@ -1439,7 +1439,7 @@ fts_query_union( /* The size can't decrease. */ ut_a(rbt_size(query->doc_ids) >= n_doc_ids); - /* Calulate the number of doc ids that were added to + /* Calculate the number of doc ids that were added to the current doc id set. */ if (query->doc_ids) { n_doc_ids = rbt_size(query->doc_ids) - n_doc_ids; @@ -2688,7 +2688,7 @@ fts_query_phrase_split( cache->stopword_info.cached_stopword, query->fts_index_table.charset)) { /* Add the word to the RB tree so that we can - calculate it's frequencey within a document. */ + calculate its frequency within a document. */ fts_query_add_word_freq(query, token); } else { ib_vector_pop(tokens); @@ -3385,7 +3385,7 @@ fts_query_read_node( /* Start from 1 since the first column has been read by the caller. Also, we rely on the order of the columns projected, to filter out ilists that are out of range and we always want to read - the doc_count irrespective of the suitablility of the row. */ + the doc_count irrespective of the suitability of the row. */ for (i = 1; exp && !skip; exp = que_node_get_next(exp), ++i) { diff --git a/storage/innobase/fts/fts0sql.cc b/storage/innobase/fts/fts0sql.cc index 1970f6f584f..781d15f2bef 100644 --- a/storage/innobase/fts/fts0sql.cc +++ b/storage/innobase/fts/fts0sql.cc @@ -115,7 +115,7 @@ Parse an SQL string. que_t* fts_parse_sql( /*==========*/ - fts_table_t* fts_table, /*!< in: FTS auxiliarry table info */ + fts_table_t* fts_table, /*!< in: FTS auxiliary table info */ pars_info_t* info, /*!< in: info struct, or NULL */ const char* sql) /*!< in: SQL string to evaluate */ { diff --git a/storage/innobase/gis/gis0rtree.cc b/storage/innobase/gis/gis0rtree.cc index 2c7db6f5f5d..3ee8910267c 100644 --- a/storage/innobase/gis/gis0rtree.cc +++ b/storage/innobase/gis/gis0rtree.cc @@ -74,7 +74,7 @@ rtr_page_split_initialize_nodes( n_recs = ulint(page_get_n_recs(page)) + 1; /*We reserve 2 MBRs memory space for temp result of split - algrithm. And plus the new mbr that need to insert, we + algorithm. And plus the new mbr that need to insert, we need (n_recs + 3)*MBR size for storing all MBRs.*/ buf = static_cast(mem_heap_alloc( heap, DATA_MBR_LEN * (n_recs + 3) @@ -277,7 +277,7 @@ rtr_update_mbr_field( ins_suc = false; /* Since btr_cur_update_alloc_zip could - reorganize the page, we need to repositon + reorganize the page, we need to reposition cursor2. */ if (cursor2) { cursor2->page_cur.rec = @@ -1888,7 +1888,7 @@ Calculates MBR_AREA(a+b) - MBR_AREA(a) Note: when 'a' and 'b' objects are far from each other, the area increase can be really big, so this function can return 'inf' as a result. -Return the area increaed. */ +Return the area increased. */ static double rtree_area_increase( const uchar* a, /*!< in: original mbr. */ diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 05d7b46866d..ba17f304330 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -3243,7 +3243,7 @@ static bool innobase_query_caching_table_check_low( retrieval or storing into: (1) There should not be any locks on the table. - (2) Someother trx shouldn't invalidate the cache before this + (2) Some other trx shouldn't invalidate the cache before this transaction started. (3) Read view shouldn't exist. If exists then the view low_limit_id should be greater than or equal to the transaction that @@ -6819,7 +6819,7 @@ wsrep_store_key_val_for_row( if (true_len > key_len) { true_len = key_len; } - /* cannot exceed max column lenght either, we may need to truncate + /* cannot exceed max column length either, we may need to truncate the stored value: */ if (true_len > sizeof(sorted)) { true_len = sizeof(sorted); @@ -8012,9 +8012,9 @@ func_exit: } /** Fill the update vector's "old_vrow" field for those non-updated, -but indexed columns. Such columns could stil present in the virtual +but indexed columns. Such columns could still be present in the virtual index rec fields even if they are not updated (some other fields updated), -so needs to be logged. +so they need to be logged. @param[in] prebuilt InnoDB prebuilt struct @param[in,out] vfield field to filled @param[in] o_len actual column length @@ -10361,7 +10361,7 @@ ha_innobase::wsrep_append_keys( if (is_null0 != is_null1 || len0 != len1 || memcmp(key0, key1, len0)) { - /* This key has chaged. If it + /* This key has changed. If it is unique, this is an exclusive operation -> upgrade key type */ if (key_info->flags & HA_NOSAME) { @@ -11065,7 +11065,7 @@ create_index( prefix, key_part->field is not the table's column (it's a "fake" field forged in open_table_from_share() with length equal to the length of the prefix); so we have to go to - form->fied. */ + form->field. */ Field* field= form->field[key_part->field->field_index]; if (field == NULL) ut_error; diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index 3e961f011bd..c58906b746a 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -514,10 +514,10 @@ protected: /** the size of upd_buf in bytes */ ulint m_upd_buf_size; - /** Flags that specificy the handler instance (table) capability. */ + /** Flags that specify the handler instance (table) capability. */ Table_flags m_int_table_flags; - /** Index into the server's primkary keye meta-data table->key_info{} */ + /** Index into the server's primary key meta-data table->key_info{} */ uint m_primary_key; /** this is set to 1 when we are starting a table scan but have @@ -532,7 +532,7 @@ protected: bool m_mysql_has_locked; /** If true, disable the Rowid Filter. It is disabled when - the enigne is intialized for making rnd_pos() calls */ + the engine is intialized for making rnd_pos() calls */ bool m_disable_rowid_filter; }; diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index dd07d891bce..ba139fc22ee 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -948,7 +948,7 @@ my_error_innodb(dberr_t error, const char *table, ulint flags) } /** Get the name of an erroneous key. -@param[in] error_key_num InnoDB number of the erroneus key +@param[in] error_key_num InnoDB number of the erroneous key @param[in] ha_alter_info changes that were being performed @param[in] table InnoDB table @return the name of the erroneous key */ @@ -1539,7 +1539,7 @@ static bool alter_options_need_rebuild( /* Specifying ROW_FORMAT or KEY_BLOCK_SIZE requires rebuilding the table. (These attributes in the .frm file may disagree with the InnoDB data dictionary, and - the interpretation of thse attributes depends on + the interpretation of these attributes depends on InnoDB parameters. That is why we for now always require a rebuild when these attributes are specified.) */ return true; @@ -3150,7 +3150,7 @@ innobase_col_check_fk( } /** Check whether the foreign key constraint is on base of any stored columns. -@param[in] foreign Foriegn key constraing information +@param[in] foreign Foreign key constraint information @param[in] table table to which the foreign key objects to be added @param[in] s_cols list of stored column information in the table. @@ -8747,7 +8747,7 @@ found_col: DBUG_RETURN(true); } -/* Check whether a columnn length change alter operation requires +/* Check whether a column length change alter operation requires to rebuild the template. @param[in] altered_table TABLE object for new version of table. @param[in] ha_alter_info Structure describing changes to be done @@ -10128,7 +10128,7 @@ innobase_update_foreign_cache( } else { /* Drop the foreign key constraints if the table was not rebuilt. If the table is rebuilt, - there would not be any foreign key contraints for + there would not be any foreign key constraints for it yet in the data dictionary cache. */ for (ulint i = 0; i < ctx->num_to_drop_fk; i++) { dict_foreign_t* fk = ctx->drop_fk[i]; diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index 71e98bfa224..68e230c043e 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -986,7 +986,7 @@ i_s_cmp_fill_low( mutex. Thus, some operation in page0zip.cc could increment a counter between the time we read it and clear it. We could introduce mutex protection, but it - could cause a measureable performance hit in + could cause a measurable performance hit in page0zip.cc. */ table->field[1]->store(zip_stat->compressed, true); table->field[2]->store(zip_stat->compressed_ok, true); @@ -4304,7 +4304,7 @@ static int i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item *) DBUG_RETURN(0); } - /* Aquire the mutex before allocating info_buffer, since + /* Acquire the mutex before allocating info_buffer, since UT_LIST_GET_LEN(buf_pool.LRU) could change */ mysql_mutex_lock(&buf_pool.mutex); diff --git a/storage/innobase/include/data0data.h b/storage/innobase/include/data0data.h index f95e94a4f39..0e21c3220e0 100644 --- a/storage/innobase/include/data0data.h +++ b/storage/innobase/include/data0data.h @@ -277,7 +277,7 @@ dtuple_set_n_fields( dtuple_t* tuple, /*!< in: tuple */ ulint n_fields) /*!< in: number of fields */ MY_ATTRIBUTE((nonnull)); -/** Copies a data tuple's virtaul fields to another. This is a shallow copy; +/** Copies a data tuple's virtual fields to another. This is a shallow copy; @param[in,out] d_tuple destination tuple @param[in] s_tuple source tuple */ UNIV_INLINE diff --git a/storage/innobase/include/db0err.h b/storage/innobase/include/db0err.h index 921cc977e2e..642a3d2dbe0 100644 --- a/storage/innobase/include/db0err.h +++ b/storage/innobase/include/db0err.h @@ -89,7 +89,7 @@ enum dberr_t { only happen when there are too many concurrent transactions */ DB_UNSUPPORTED, /*!< when InnoDB sees any artefact or - a feature that it can't recoginize or + a feature that it can't recognize or work with e.g., FT indexes created by a later version of the engine. */ @@ -135,7 +135,7 @@ enum dberr_t { decrypt operation failed because of missing key management plugin, or missing or incorrect key or - incorret AES method or algorithm. */ + incorrect AES method or algorithm. */ DB_IO_ERROR = 100, /*!< Generic IO error */ diff --git a/storage/innobase/include/dict0boot.h b/storage/innobase/include/dict0boot.h index 68400d2095d..75304fa5905 100644 --- a/storage/innobase/include/dict0boot.h +++ b/storage/innobase/include/dict0boot.h @@ -257,7 +257,7 @@ enum dict_fld_sys_virtual_enum { }; /* A number of the columns above occur in multiple tables. These are the -length of thos fields. */ +length of those fields. */ #define DICT_FLD_LEN_SPACE 4 #define DICT_FLD_LEN_FLAGS 4 diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h index 4a7fb63f318..c1f92950d06 100644 --- a/storage/innobase/include/dict0dict.h +++ b/storage/innobase/include/dict0dict.h @@ -1383,7 +1383,7 @@ public: inline void add(dict_table_t *table) noexcept; /** Remove a table definition from the data dictionary cache. @param[in,out] table cached table definition to be evicted - @param[in] lru whether this is part of least-recently-used evictiono + @param[in] lru whether this is part of least-recently-used eviction @param[in] keep whether to keep (not free) the object */ void remove(dict_table_t *table, bool lru= false, bool keep= false) noexcept; diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h index f8b619f30ff..cf8cb2c241b 100644 --- a/storage/innobase/include/dict0mem.h +++ b/storage/innobase/include/dict0mem.h @@ -285,8 +285,8 @@ index tables) of a FTS table are in HEX format. */ (table->flags2 &= ~(flag) & ((1U << DICT_TF2_BITS) - 1)) /** Tables could be chained together with Foreign key constraint. When -first load the parent table, we would load all of its descedents. -This could result in rescursive calls and out of stack error eventually. +first load the parent table, we would load all of its descendants. +This could result in recursive calls and out of stack error eventually. DICT_FK_MAX_RECURSIVE_LOAD defines the maximum number of recursive loads, when exceeded, the child table will not be loaded. It will be loaded when the foreign constraint check needs to be run. */ diff --git a/storage/innobase/include/dict0types.h b/storage/innobase/include/dict0types.h index 5c6f772b4ad..12daa2e82c6 100644 --- a/storage/innobase/include/dict0types.h +++ b/storage/innobase/include/dict0types.h @@ -159,7 +159,7 @@ struct table_name_t Note: the spatial status is part of persistent undo log, so we should not modify the values in MySQL 5.7 */ enum spatial_status_t { - /* Unkown status (undo format in 5.7.9) */ + /* Unknown status (undo format in 5.7.9) */ SPATIAL_UNKNOWN = 0, /** Not used in gis index. */ diff --git a/storage/innobase/include/fil0crypt.h b/storage/innobase/include/fil0crypt.h index d83923e2326..007f03d1ef2 100644 --- a/storage/innobase/include/fil0crypt.h +++ b/storage/innobase/include/fil0crypt.h @@ -46,7 +46,7 @@ void fil_crypt_threads_signal(bool broadcast= false); /** * CRYPT_SCHEME_UNENCRYPTED * - * Used as intermediate state when convering a space from unencrypted + * Used as intermediate state when converting a space from unencrypted * to encrypted */ /** @@ -74,7 +74,7 @@ struct key_struct extern ulong srv_encrypt_tables; /** Mutex helper for crypt_data->scheme -@param[in, out] schme encryption scheme +@param[in, out] scheme encryption scheme @param[in] exit should we exit or enter mutex ? */ void crypt_data_scheme_locker( diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index cd1aa8ca336..d9b92764eac 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -389,7 +389,7 @@ private: /** Whether the tablespace is being imported */ bool being_imported= false; - /** Whether any corrupton of this tablespace has been reported */ + /** Whether any corruption of this tablespace has been reported */ mutable std::atomic_flag is_corrupted{false}; public: diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h index 07398fc805f..b8aed99571a 100644 --- a/storage/innobase/include/fsp0fsp.h +++ b/storage/innobase/include/fsp0fsp.h @@ -698,7 +698,7 @@ inline uint32_t fsp_flags_is_incompatible_mysql(uint32_t flags) { /* MySQL-8.0 SDI flag (bit 14), - or MySQL 5.7 Encyption flag (bit 13) + or MySQL 5.7 Encryption flag (bit 13) */ return flags >> 13 & 3; } diff --git a/storage/innobase/include/fts0fts.h b/storage/innobase/include/fts0fts.h index 8e7e6da959d..80b0ab1fd1a 100644 --- a/storage/innobase/include/fts0fts.h +++ b/storage/innobase/include/fts0fts.h @@ -88,7 +88,7 @@ those defined in mysql file ft_global.h */ #define FTS_INDEX_TABLE_IND_NAME "FTS_INDEX_TABLE_IND" -/** The number of FTS index partitions for a fulltext idnex */ +/** The number of FTS index partitions for a fulltext index */ #define FTS_NUM_AUX_INDEX 6 /** Threshold where our optimize thread automatically kicks in */ diff --git a/storage/innobase/include/fts0priv.h b/storage/innobase/include/fts0priv.h index 04faceb995e..1bb1b2a27e5 100644 --- a/storage/innobase/include/fts0priv.h +++ b/storage/innobase/include/fts0priv.h @@ -50,7 +50,7 @@ enum fts_table_state_enum { typedef enum fts_table_state_enum fts_table_state_t; -/** The default time to wait for the background thread (in microsecnds). */ +/** The default time to wait for the background thread (in microseconds). */ #define FTS_MAX_BACKGROUND_THREAD_WAIT 10000 /** Maximum number of iterations to wait before we complain */ diff --git a/storage/innobase/include/fts0types.h b/storage/innobase/include/fts0types.h index 694ef75b011..8db88bfc176 100644 --- a/storage/innobase/include/fts0types.h +++ b/storage/innobase/include/fts0types.h @@ -79,7 +79,7 @@ struct fts_index_cache_t { CHARSET_INFO* charset; /*!< charset */ }; -/** Stop word control infotmation. */ +/** Stop word control information. */ struct fts_stopword_t { ulint status; /*!< Status of the stopword tree */ ib_alloc_t* heap; /*!< The memory allocator to use */ diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index 9c192f2a586..6fdf2f180bf 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -734,7 +734,7 @@ public: private: bool m_initialised; - /** mutex proteting the locks */ + /** mutex protecting the locks */ alignas(CPU_LEVEL1_DCACHE_LINESIZE) IF_DBUG(srw_lock_debug,srw_spin_lock) latch; #ifdef SUX_LOCK_GENERIC diff --git a/storage/innobase/include/lock0priv.h b/storage/innobase/include/lock0priv.h index 14f0a6e0903..a8869f6db72 100644 --- a/storage/innobase/include/lock0priv.h +++ b/storage/innobase/include/lock0priv.h @@ -259,7 +259,7 @@ updated but the lock prevents insert of a user record to the end of the page. Next key locks will prevent the phantom problem where new rows could appear to SELECT result sets after the select operation has been -performed. Prevention of phantoms ensures the serilizability of +performed. Prevention of phantoms ensures the serializability of transactions. What should we check if an insert of a new record is wanted? Only the lock on the next record on the same page, because also the diff --git a/storage/innobase/include/lock0types.h b/storage/innobase/include/lock0types.h index da235fb06a0..022c4191b16 100644 --- a/storage/innobase/include/lock0types.h +++ b/storage/innobase/include/lock0types.h @@ -257,7 +257,7 @@ struct ib_lock_t bool can_be_bypassed(bool has_s_lock_or_stronger) const noexcept { ut_ad(!is_table()); - /* We don't neet do check supremum bit in the lock's bitmap here, + /* We don't need to check supremum bit in the lock's bitmap here, because the function is always called after checking for bypass_mode, which already contains check for supremum. */ ut_ad(!is_insert_intention() || is_gap()); diff --git a/storage/innobase/include/mtr0types.h b/storage/innobase/include/mtr0types.h index 19db13a12b6..8a4f0fcc03e 100644 --- a/storage/innobase/include/mtr0types.h +++ b/storage/innobase/include/mtr0types.h @@ -191,7 +191,7 @@ The end of the mini-transaction would be indicated by the end byte 0x00 or 0x01; @see log_sys.get_sequence_bit(). If log_sys.is_encrypted(), that is followed by 8 bytes of nonce (part of initialization vector). That will be followed by 4 bytes -of CRC-32C of the entire mini-tranasction, excluding the end byte. */ +of CRC-32C of the entire mini-transaction, excluding the end byte. */ /** Redo log record types. These bit patterns (3 bits) will be written to the redo log file, so the existing codes or their interpretation on diff --git a/storage/innobase/include/row0ftsort.h b/storage/innobase/include/row0ftsort.h index 3ffa8243306..3d763026b31 100644 --- a/storage/innobase/include/row0ftsort.h +++ b/storage/innobase/include/row0ftsort.h @@ -35,7 +35,7 @@ Created 10/13/2010 Jimmy Yang #include "btr0bulk.h" #include "srv0srv.h" -/** This structure defineds information the scan thread will fetch +/** This structure defines information the scan thread will fetch and put to the linked list for parallel tokenization/sort threads to process */ typedef struct fts_doc_item fts_doc_item_t; diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h index 63858f25f02..2f50aa1560e 100644 --- a/storage/innobase/include/row0mysql.h +++ b/storage/innobase/include/row0mysql.h @@ -649,7 +649,7 @@ struct row_prebuilt_t { version is built in consistent read */ bool in_fts_query; /*!< Whether we are in a FTS query */ bool fts_doc_id_in_read_set; /*!< true if table has externally - defined FTS_DOC_ID coulmn. */ + defined FTS_DOC_ID column. */ /*----------------------*/ ulonglong autoinc_last_value; /*!< last value of AUTO-INC interval */ diff --git a/storage/innobase/include/row0quiesce.h b/storage/innobase/include/row0quiesce.h index b05b7666b0b..ecd6185cac7 100644 --- a/storage/innobase/include/row0quiesce.h +++ b/storage/innobase/include/row0quiesce.h @@ -46,7 +46,7 @@ row_quiesce_table_start( /*********************************************************************//** Set a table's quiesce state. -@return DB_SUCCESS or errro code. */ +@return DB_SUCCESS or error code. */ dberr_t row_quiesce_set_state( /*==================*/ diff --git a/storage/innobase/include/row0undo.h b/storage/innobase/include/row0undo.h index ae067a8af4f..667b5b782a9 100644 --- a/storage/innobase/include/row0undo.h +++ b/storage/innobase/include/row0undo.h @@ -71,7 +71,7 @@ is assigned to handle an undo log record in the chain of different versions of the record, and the other thread happens to get the x-latch to the clustered index record at the right time. If a query thread notices that the clustered index record it is looking -for is missing, or the roll ptr field in the record doed not point to the +for is missing, or the roll ptr field in the record does not point to the undo log record the thread was assigned to handle, then it gives up the undo task for that undo log record, and fetches the next. This situation can occur just in the case where the transaction modified the same record several times diff --git a/storage/innobase/include/srv0mon.h b/storage/innobase/include/srv0mon.h index 6b9a6f09681..502c69be9da 100644 --- a/storage/innobase/include/srv0mon.h +++ b/storage/innobase/include/srv0mon.h @@ -79,7 +79,7 @@ struct monitor_value_t { monitor_running_t mon_status; /* whether monitor still running */ }; -/** Follwoing defines are possible values for "monitor_type" field in +/** Following defines are possible values for "monitor_type" field in "struct monitor_info" */ enum monitor_type_t { MONITOR_NONE = 0, /*!< No monitoring */ @@ -156,7 +156,7 @@ enum monitor_id_t { MONITOR_OVLD_ROW_LOCK_WAIT, MONITOR_OVLD_LOCK_AVG_WAIT_TIME, - /* Buffer and I/O realted counters. */ + /* Buffer and I/O related counters. */ MONITOR_MODULE_BUFFER, MONITOR_OVLD_BUFFER_POOL_SIZE, MONITOR_OVLD_BUF_POOL_READS, @@ -432,11 +432,11 @@ counter option. */ (monitor_set_tbl[unsigned(monitor) / NUM_BITS_ULINT] & \ (ulint(1) << (unsigned(monitor) % NUM_BITS_ULINT))) -/** The actual monitor counter array that records each monintor counter +/** The actual monitor counter array that records each monitor counter value */ extern monitor_value_t innodb_counter_value[NUM_MONITOR]; -/** Following are macro defines for basic montior counter manipulations. +/** Following are macro defines for basic monitor counter manipulations. Please note we do not provide any synchronization for these monitor operations due to performance consideration. Most counters can be placed under existing mutex protections in respective code @@ -679,7 +679,7 @@ is monotonically increasing, only max value needs to be updated */ } \ } -/** Some values such as log sequence number are montomically increasing +/** Some values such as log sequence number are monotonically increasing number, do not need to record max/min values */ #define MONITOR_SET_SIMPLE(monitor, value) \ MONITOR_CHECK_DEFINED(value); \ diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index 43174b0a9dc..ec1433ce9c9 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -150,7 +150,7 @@ extern mysql_mutex_t srv_monitor_file_mutex; extern FILE* srv_monitor_file; /** Mutex for locking srv_misc_tmpfile */ extern mysql_mutex_t srv_misc_tmpfile_mutex; -/* Temporary file for miscellanous diagnostic output */ +/* Temporary file for miscellaneous diagnostic output */ extern FILE* srv_misc_tmpfile; /* Server parameters which are read from the initfile */ @@ -584,7 +584,7 @@ struct export_var_t{ my_bool innodb_buffer_pool_load_incomplete;/*!< Buf pool load incomplete */ ulint innodb_buffer_pool_pages_total; /*!< Buffer pool size */ ulint innodb_buffer_pool_bytes_data; /*!< File bytes used */ - ulint innodb_buffer_pool_pages_misc; /*!< Miscellanous pages */ + ulint innodb_buffer_pool_pages_misc; /*!< Miscellaneous pages */ #ifdef UNIV_DEBUG ulint innodb_buffer_pool_pages_latched; /*!< Latched pages */ #endif /* UNIV_DEBUG */ diff --git a/storage/innobase/include/trx0purge.h b/storage/innobase/include/trx0purge.h index 21ec23817d2..b6e0751015c 100644 --- a/storage/innobase/include/trx0purge.h +++ b/storage/innobase/include/trx0purge.h @@ -65,7 +65,7 @@ class purge_sys_t { public: typedef std::vector> container_type; - /** Number of bits reseved to shift trx_no in purge queue element */ + /** Number of bits reserved to shift trx_no in purge queue element */ static constexpr unsigned TRX_NO_SHIFT= 8; bool empty() const { return m_array.empty(); } diff --git a/storage/innobase/include/trx0sys.h b/storage/innobase/include/trx0sys.h index c1bbf143a51..2567a672055 100644 --- a/storage/innobase/include/trx0sys.h +++ b/storage/innobase/include/trx0sys.h @@ -1068,7 +1068,7 @@ public: /** Takes MVCC snapshot. - To reduce malloc probablility we reserve rw_trx_hash.size() + 32 elements + To reduce malloc probability we reserve rw_trx_hash.size() + 32 elements in ids. For details about get_rw_trx_hash_version() != get_max_trx_id() spin diff --git a/storage/innobase/include/ut0lst.h b/storage/innobase/include/ut0lst.h index 7b7ed7b8e80..cc06344207b 100644 --- a/storage/innobase/include/ut0lst.h +++ b/storage/innobase/include/ut0lst.h @@ -117,7 +117,7 @@ Initializes the base node of a two-way list. } /** Functor for accessing the embedded node within a list element. This is -required because some lists can have the node emebedded inside a nested +required because some lists can have the node embedded inside a nested struct/union. See lock0priv.h (table locks) for an example. It provides a specialised functor to grant access to the list node. */ template diff --git a/storage/innobase/include/ut0pool.h b/storage/innobase/include/ut0pool.h index e5df50fa071..dbe290faf45 100644 --- a/storage/innobase/include/ut0pool.h +++ b/storage/innobase/include/ut0pool.h @@ -204,7 +204,7 @@ private: /** Upper limit of used space */ Element* m_last; - /** Priority queue ordered on the pointer addresse. */ + /** Priority queue ordered on the pointer addresses. */ pqueue_t m_pqueue; /** Lock strategy to use */ diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 1fc8b52e940..61111010cd6 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -843,7 +843,7 @@ lock_rec_has_to_wait( /* If the upper server layer has already decided on the commit order between the transaction requesting the lock and the transaction owning the lock, we do not - need to wait for gap locks. Such ordeering by the upper + need to wait for gap locks. Such ordering by the upper server layer happens in parallel replication, where the commit order is fixed to match the original order on the master. @@ -2747,7 +2747,7 @@ lock_rec_inherit_to_gap(hash_cell_t &heir_cell, const page_id_t heir, not create bogus gap locks for non-gap locks for READ UNCOMMITTED and READ COMMITTED isolation levels. LOCK_ORDINARY and LOCK_GAP require a gap before the record to be locked, that is why - setting lock on supremmum is necessary. */ + setting lock on supremum is necessary. */ ((!from_split || !lock->is_record_not_gap()) && lock->mode() != (lock_trx->duplicates ? LOCK_S : LOCK_X)))) { @@ -4172,7 +4172,7 @@ void lock_table_resurrect(dict_table_t *table, trx_t *trx, lock_mode mode) { /* This is executed at server startup while no connections - are alowed. Do not bother with lock elision. */ + are allowed. Do not bother with lock elision. */ LockMutexGuard g{SRW_LOCK_CALL}; ut_ad(!lock_table_other_has_incompatible(trx, LOCK_WAIT, table, mode)); @@ -4714,7 +4714,7 @@ void lock_release_on_drop(trx_t *trx) /** Reset a lock bit and rebuild waiting queue. @param cell rec hash cell of in_lock -@param lock the lock with supemum bit set */ +@param lock the lock with supremum bit set */ static void lock_rec_unlock(hash_cell_t &cell, lock_t *lock, ulint heap_no) { ut_ad(lock_rec_get_nth_bit(lock, heap_no)); @@ -4899,7 +4899,7 @@ reiterate: lock_sys.rd_unlock(); trx->mutex_unlock(); mtr.start(); - /* The curr thread is asociated with trx, which was just + /* The curr thread is associated with trx, which was just moved to XA PREPARE state. Other threads may not modify the existing lock objects of trx; they may only create new ones in lock_rec_convert_impl_to_expl() or lock_rec_move(). */ @@ -5327,7 +5327,7 @@ static ulint lock_get_n_rec_locks() /*********************************************************************//** Prints info of locks for all transactions. -@return FALSE if not able to acquire lock_sys.latch (and dislay info) */ +@return FALSE if not able to acquire lock_sys.latch (and display info) */ ibool lock_print_info_summary( /*====================*/ @@ -7330,7 +7330,7 @@ and less modified rows. Bit 0 is used to prefer orig_trx in case of a tie. ut_ad(victim->state == TRX_STATE_ACTIVE); /* victim->lock.was_chosen_as_deadlock_victim must always be set before - releasing waiting locks and reseting trx->lock.wait_lock */ + releasing waiting locks and resetting trx->lock.wait_lock */ victim->lock.was_chosen_as_deadlock_victim= true; DEBUG_SYNC_C("deadlock_report_before_lock_releasing"); lock_cancel_waiting_and_release(victim->lock.wait_lock); diff --git a/storage/innobase/lock/lock0prdt.cc b/storage/innobase/lock/lock0prdt.cc index 12b2a990f8c..350f92098b9 100644 --- a/storage/innobase/lock/lock0prdt.cc +++ b/storage/innobase/lock/lock0prdt.cc @@ -257,7 +257,7 @@ lock_prdt_has_lock( lock); /* if the lock predicate operator is the same - as the one to look, and prdicate test is successful, + as the one to look, and predicate test is successful, then we find a lock */ if (cur_prdt->op == prdt->op && lock_prdt_consistent(cur_prdt, prdt, 0)) { diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index f141c69388f..41df4dc3dd7 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -300,7 +300,7 @@ page_corrupted: if (UNIV_UNLIKELY(block.page.id().page_no() < 3 || block.page.zip.ssize)) goto record_corrupted; - static_assert(INIT_ROW_FORMAT_REDUNDANT == 0, "compatiblity"); + static_assert(INIT_ROW_FORMAT_REDUNDANT == 0, "compatibility"); static_assert(INIT_ROW_FORMAT_DYNAMIC == 1, "compatibility"); if (UNIV_UNLIKELY(!rlen)) goto record_corrupted; @@ -680,7 +680,7 @@ static struct p.first->second.lsn= lsn; p.first->second.file_name= defer.file_name; } - /* Add the newly added defered space and change the file name */ + /* Add the newly added deferred space and change the file name */ recv_spaces_t::iterator it{recv_spaces.find(space)}; if (it != recv_spaces.end()) it->second.name = defer.file_name; diff --git a/storage/innobase/log/log0sync.cc b/storage/innobase/log/log0sync.cc index f6ca440efa8..5c01fdb4e77 100644 --- a/storage/innobase/log/log0sync.cc +++ b/storage/innobase/log/log0sync.cc @@ -19,7 +19,7 @@ this program; if not, write to the Free Software Foundation, Inc., The group commit synchronization used in log_write_up_to() works as follows -For simplicity, lets consider only write operation,synchronozation of +For simplicity, lets consider only write operation,synchronization of flush operation works the same. Rules of the game @@ -42,17 +42,17 @@ Fixes a) but burns CPU unnecessary. c) Mutex / condition variable combo. -Condtion variable notifies (broadcast) all waiters, whenever +Condition variable notifies (broadcast) all waiters, whenever last written lsn is changed. -Has a disadvantage of many suprious wakeups, stress on OS scheduler, +Has a disadvantage of many spurious wakeups, stress on OS scheduler, and mutex contention. d) Something else. Make use of the waiter's lsn parameter, and only wakeup "right" waiting threads. -We chose d). Even if implementation is more complicated than alternatves +We chose d). Even if implementation is more complicated than alternatives due to the need to maintain list of waiters, it provides the best performance. See group_commit_lock implementation for details. diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index c38197cebd7..1e6e5d54cc4 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -3251,7 +3251,7 @@ more concurrent threads via thread_group setting. @param[in] n_reader_threads - max number of concurrently executing read callbacks -@param[in] n_writer_thread - max number of cuncurrently +@param[in] n_writer_thread - max number of concurrently executing write callbacks @return 0 for success, !=0 for error. */ diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc index 43475882943..2445392e0b8 100644 --- a/storage/innobase/page/page0page.cc +++ b/storage/innobase/page/page0page.cc @@ -2205,7 +2205,7 @@ wrong_page_type: int ret = cmp_rec_rec( rec, old_rec, offsets, old_offsets, index); - /* For spatial index, on nonleaf leavel, we + /* For spatial index, on nonleaf level, we allow recs to be equal. */ if (ret <= 0 && !(ret == 0 && index->is_spatial() && !page_is_leaf(page))) { diff --git a/storage/innobase/pars/pars0opt.cc b/storage/innobase/pars/pars0opt.cc index f229ae8df6c..3dc64073498 100644 --- a/storage/innobase/pars/pars0opt.cc +++ b/storage/innobase/pars/pars0opt.cc @@ -411,7 +411,7 @@ opt_calc_index_goodness( /*******************************************************************//** Calculates the number of matched fields based on an index goodness. -@return number of excatly or partially matched fields */ +@return number of exactly or partially matched fields */ UNIV_INLINE ulint opt_calc_n_fields_from_goodness( diff --git a/storage/innobase/read/read0read.cc b/storage/innobase/read/read0read.cc index 46d58326edf..e522980e839 100644 --- a/storage/innobase/read/read0read.cc +++ b/storage/innobase/read/read0read.cc @@ -121,7 +121,7 @@ created. Thus we can easily see if this record was changed by the creating transaction. Because we already have clustered record we can access roll_ptr. Using this roll_ptr we can fetch undo record. We can now check that undo_no of the undo record is less than undo_no of the -trancaction which created a view when cursor was created. We see this +transaction which created a view when cursor was created. We see this clustered record only in case when record undo_no is less than undo_no in the view. If this is not true we build based on undo_rec previous version of the record. This record is found because purge can't remove diff --git a/storage/innobase/row/row0ftsort.cc b/storage/innobase/row/row0ftsort.cc index 80a93116cb4..94a077821e1 100644 --- a/storage/innobase/row/row0ftsort.cc +++ b/storage/innobase/row/row0ftsort.cc @@ -504,7 +504,7 @@ row_merge_fts_doc_tokenize( row_merge_fts_doc_tokenize_by_parser(doc, parser, t_ctx); - /* Just indictate we have parsed all the word */ + /* Just indicate that we have parsed all words */ t_ctx->processed_len += 1; } @@ -593,7 +593,7 @@ row_merge_fts_doc_tokenize( variable-length column is less than 128 bytes or the maximum length is less than 256 bytes. */ - /* One variable length column, word with its lenght less than + /* One variable length column, word with its length less than fts_max_token_size, add one extra size and one extra byte. Since the max length for FTS token now is larger than 255, @@ -1276,7 +1276,7 @@ row_fts_insert_tuple( ulint num_item; /* Getting a new word, flush the last position info - for the currnt word in fts_node */ + for the current word in fts_node */ if (ib_vector_size(positions) > 0) { fts_cache_node_add_positions( NULL, fts_node, *in_doc_id, positions); diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index 809a9128838..214283b602e 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -69,7 +69,7 @@ struct row_stats_t { found in the index */ ulint m_n_purged; /*!< Number of records purged - optimisatically */ + optimistically */ ulint m_n_rows; /*!< Number of rows */ @@ -269,7 +269,7 @@ struct row_import { dict_col_t* m_cols; /*!< Column data */ byte** m_col_names; /*!< Column names, we store the - column naems separately becuase + column names separately because there is no field to store the value in dict_col_t */ @@ -385,7 +385,7 @@ public: /** Class that purges delete marked records from indexes, both secondary and cluster. It does a pessimistic delete. This should only be done if we -couldn't purge the delete marked reocrds during Phase I. */ +couldn't purge the delete marked records during Phase I. */ class IndexPurge { public: /** Constructor @@ -1007,7 +1007,7 @@ private: rec_t* rec, const rec_offs* offsets) UNIV_NOTHROW; - /** In the clustered index, adjist the BLOB pointers as needed. + /** In the clustered index, adjust the BLOB pointers as needed. Also update the BLOB reference, write the new space id. @param rec record to update @param offsets column offsets for the record @@ -1135,7 +1135,7 @@ row_import::get_n_rows( return(index->m_stats.m_n_rows); } -/** Get the number of rows for which purge failed uding the convert phase. +/** Get the number of rows for which purge failed during the convert phase. @param name index name @return number of rows for which purge failed. */ ulint @@ -4729,7 +4729,7 @@ row_import_for_mysql( trx_t* trx = prebuilt->trx; /* The caller assured that this is not read_only_mode and that no - temorary tablespace is being imported. */ + temporary tablespace is being imported. */ ut_ad(!srv_read_only_mode); ut_ad(!table->is_temporary()); diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index d2d3714c586..e32654ab9f7 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -2334,7 +2334,7 @@ row_ins_duplicate_error_in_clust( /* NOTE: For unique non-clustered indexes there may be any number of delete marked records with the same value for the non-clustered index key (remember multiversioning), and which differ only in - the row refererence part of the index record, containing the + the row reference part of the index record, containing the clustered index key fields. For such a secondary index record, to avoid race condition, we must FIRST do the insertion and after that check that the uniqueness condition is not breached! */ diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 1c9b730189d..f837818b709 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -829,7 +829,7 @@ error: if (fixed_len) { #ifdef UNIV_DEBUG - /* len should be between size calcualted base on + /* len should be between size calculated based on mbmaxlen and mbminlen */ ut_ad(len <= fixed_len); ut_ad(!col->mbmaxlen || len >= col->mbminlen @@ -4337,8 +4337,8 @@ void row_merge_drop_temp_indexes() } -/** Create temporary merge files in the given paramater path, and if -UNIV_PFS_IO defined, register the file descriptor with Performance Schema. +/** Create temporary merge files in the given parameter path, and if +UNIV_PFS_IO is defined, register the file descriptor with Performance Schema. @param[in] path location for creating temporary merge files, or NULL @return File descriptor */ static pfs_os_file_t row_merge_file_create_mode(const char *path, int mode) diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 1d5a07d303c..73420261b1a 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -782,7 +782,7 @@ row_create_prebuilt( /* Maximum size of the buffer needed for conversion of INTs from little endian format to big endian format in an index. An index - can have maximum 16 columns (MAX_REF_PARTS) in it. Therfore + can have maximum 16 columns (MAX_REF_PARTS) in it. Therefore Max size for PK: 16 * 8 bytes (BIGINT's size) = 128 bytes Max size Secondary index: 16 * 8 bytes + PK = 256 bytes. */ #define MAX_SRCH_KEY_VAL_BUFFER 2* (8 * MAX_REF_PARTS) @@ -1821,7 +1821,7 @@ void thd_get_query_start_data(THD *thd, char *buf); This is used in UPDATE CASCADE/SET NULL of a system versioned referenced table. -node->historical_row: dtuple_t containing pointers of row changed by refertial +node->historical_row: dtuple_t containing pointers of row changed by referential action. @param[in] thr current query thread @@ -2132,7 +2132,7 @@ row_create_index_for_mysql( /* For temp-table we avoid insertion into SYSTEM TABLES to maintain performance and so we have separate path that directly - just updates dictonary cache. */ + just updates dictionary cache. */ if (!table->is_temporary()) { ut_ad(trx->state == TRX_STATE_ACTIVE); ut_ad(trx->dict_operation); diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc index 5e955694683..d91fe8a7a7f 100644 --- a/storage/innobase/row/row0purge.cc +++ b/storage/innobase/row/row0purge.cc @@ -1647,7 +1647,7 @@ row_purge_step( #ifdef UNIV_DEBUG /***********************************************************//** -Validate the persisent cursor. The purge node has two references +Validate the persistent cursor. The purge node has two references to the clustered index record - one via the ref member, and the other via the persistent cursor. These two references must match each other if the found_clust flag is set. diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 10df64b2abc..0be39009d90 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -4509,8 +4509,8 @@ early_not_found: } } - /* We don't support sequencial scan for Rtree index, because it - is no meaning to do so. */ + /* We don't support sequential scan for Rtree index because it + is pointless. */ if (dict_index_is_spatial(index) && !RTREE_SEARCH_MODE(mode)) { trx->op_info = ""; DBUG_RETURN(DB_END_OF_INDEX); @@ -4731,7 +4731,7 @@ wait_table_again: if (UNIV_LIKELY(direction != 0)) { if (spatial_search) { /* R-Tree access does not need to do - cursor position and resposition */ + cursor position and reposition */ goto next_rec; } diff --git a/storage/innobase/row/row0vers.cc b/storage/innobase/row/row0vers.cc index c7990445a05..f29650cd7ca 100644 --- a/storage/innobase/row/row0vers.cc +++ b/storage/innobase/row/row0vers.cc @@ -402,8 +402,8 @@ row_vers_impl_x_locked( const rec_t* clust_rec; dict_index_t* clust_index; - /* The function must not be invoked under lock_sys latch to prevert - latching orded violation, i.e. page latch must be acquired before + /* The function must not be invoked under lock_sys latch to prevent + latching order violation, i.e. page latch must be acquired before lock_sys latch */ lock_sys.assert_unlocked(); /* The current function can be called from lock_rec_unlock_unmodified() diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc index 2a22403e125..28bde7ce271 100644 --- a/storage/innobase/srv/srv0mon.cc +++ b/storage/innobase/srv/srv0mon.cc @@ -1582,7 +1582,7 @@ srv_mon_process_existing_counter( & MONITOR_DISPLAY_CURRENT) { MONITOR_SET(monitor_id, value); } else { - /* Most status counters are montonically + /* Most status counters are monotonically increasing, no need to update their minimum values. Only do so if "update_min" set to TRUE */ diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 5c34b356790..dcc82f4c34b 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -346,7 +346,7 @@ mysql_mutex_t srv_monitor_file_mutex; FILE* srv_monitor_file; /** Mutex for locking srv_misc_tmpfile */ mysql_mutex_t srv_misc_tmpfile_mutex; -/** Temporary file for miscellanous diagnostic output */ +/** Temporary file for miscellaneous diagnostic output */ FILE* srv_misc_tmpfile; /* The following counts are used by the srv_master_callback. */ diff --git a/storage/innobase/trx/trx0roll.cc b/storage/innobase/trx/trx0roll.cc index 5c89bfb7c33..03900cb8a58 100644 --- a/storage/innobase/trx/trx0roll.cc +++ b/storage/innobase/trx/trx0roll.cc @@ -201,7 +201,7 @@ dberr_t trx_rollback_for_mysql(trx_t* trx) case TRX_STATE_NOT_STARTED: trx->will_lock = false; ut_ad(trx->mysql_thd); - /* Galera transaction abort can be invoked from MDL acquision + /* Galera transaction abort can be invoked from MDL acquisition code, so trx->lock.was_chosen_as_deadlock_victim can be set even if trx->state is TRX_STATE_NOT_STARTED. */ ut_ad(!(trx->lock.was_chosen_as_deadlock_victim & 1)); diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc index 21c0c10482a..4fa0dba9a14 100644 --- a/storage/innobase/trx/trx0undo.cc +++ b/storage/innobase/trx/trx0undo.cc @@ -59,7 +59,7 @@ we trigger the start of a purge? When a transaction writes to an undo log, it may notice that the space is running out. When a read view is closed, it may make some history superfluous. The server can have an utility which periodically checks if it can purge some history. - In a parallellized purge we have the problem that a query thread + In a parallelized purge we have the problem that a query thread can remove a delete marked clustered index record before another query thread has processed an earlier version of the record, which cannot then be done because the row cannot be constructed from the clustered index diff --git a/storage/innobase/ut/ut0rbt.cc b/storage/innobase/ut/ut0rbt.cc index 7ba6693cbc1..4894dd895a4 100644 --- a/storage/innobase/ut/ut0rbt.cc +++ b/storage/innobase/ut/ut0rbt.cc @@ -61,7 +61,7 @@ static ibool rbt_check_ordering( /*===============*/ - const ib_rbt_t* tree) /*!< in: tree to verfify */ + const ib_rbt_t* tree) /*!< in: tree to verify */ { const ib_rbt_node_t* node; const ib_rbt_node_t* prev = NULL; @@ -414,7 +414,7 @@ rbt_find_successor( /**********************************************************************//** Find the given node's precedecessor. -@return predecessor node or NULL if no predecesor */ +@return predecessor node or NULL if no predecessor */ static ib_rbt_node_t* rbt_find_predecessor( diff --git a/storage/maria/aria_pack.c b/storage/maria/aria_pack.c index 43150d6e02c..c17be37e03b 100644 --- a/storage/maria/aria_pack.c +++ b/storage/maria/aria_pack.c @@ -2905,7 +2905,7 @@ static char *make_old_name(char *new_name, char *old_name) return fn_format(new_name,old_name,"",OLD_EXT,2+4); } - /* rutines for bit writing buffer */ + /* routines for bit writing buffer */ static void init_file_buffer(File file, pbool read_buffer) { diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc index 860c09be8e3..b15416f6fc0 100644 --- a/storage/maria/ha_maria.cc +++ b/storage/maria/ha_maria.cc @@ -4324,7 +4324,7 @@ int ha_maria::find_unique_row(uchar *record, uint constrain_no) else { /* - It is case when just unique index used instead unicue constrain + It is the case when just unique index is used instead of unique constraint (conversion from heap table). */ DBUG_ASSERT(file->s->state.header.keys > constrain_no); diff --git a/storage/maria/ha_s3.cc b/storage/maria/ha_s3.cc index b8e6b2d0a10..9ae16524b9b 100644 --- a/storage/maria/ha_s3.cc +++ b/storage/maria/ha_s3.cc @@ -503,9 +503,9 @@ int ha_s3::rename_table(const char *from, const char *to) The table is renamed to a temporary table. This only happens in the case of an ALTER PARTITION failure and there will be soon a delete issued for the temporary table. The only thing we can do - is to remove the from table. We will get an extra errors for the - uppcoming but we will ignore this minor problem for now as this - is an unlikely event and the extra warnings are just annoying, + is to remove the "from" table. We will get extra errors for this + but we will ignore this minor problem for now as this + is an unlikely event and extra warnings are just annoying, not critical. */ error= aria_delete_from_s3(s3_client, from_s3_info.bucket.str, @@ -868,7 +868,7 @@ static int s3_discover_table_existence(handlerton *hton, const char *db, /** Return a list of all S3 tables in a database - Partitoned tables are not shown + Partitioned tables are not shown */ static int s3_discover_table_names(handlerton *hton __attribute__((unused)), @@ -929,7 +929,7 @@ int ha_s3::discover_check_version() s3_info.tabledef_version= table->s->tabledef_version; /* We have to change the database and table as the table may part of a - partitoned table. In this case we want to check the frm file for the + partitioned table. In this case we want to check the frm file for the partitioned table, not the part table. */ s3_info.base_table= table->s->table_name; diff --git a/storage/maria/ma_backup.c b/storage/maria/ma_backup.c index 0384dfb4cc5..470d3fddc48 100644 --- a/storage/maria/ma_backup.c +++ b/storage/maria/ma_backup.c @@ -84,7 +84,7 @@ int aria_get_capabilities(File kfile, ARIA_TABLE_CAPABILITIES *cap) if (share.state.header.data_file_type == BLOCK_RECORD) { - /* Calulate how man pages the row bitmap covers. From _ma_bitmap_init() */ + /* Calculate how many pages the row bitmap covers. From _ma_bitmap_init() */ aligned_bit_blocks= (cap->block_size - PAGE_SUFFIX_SIZE) / 6; /* In each 6 bytes, we have 6*8/3 = 16 pages covered diff --git a/storage/maria/ma_bitmap.c b/storage/maria/ma_bitmap.c index e3f8cb3ed84..bdfba862b0c 100644 --- a/storage/maria/ma_bitmap.c +++ b/storage/maria/ma_bitmap.c @@ -65,7 +65,7 @@ 'min record length'. Tail pages are for overflow data which can be of any size and thus doesn't have to be adjusted for different tables. If we add more columns to the table, some of the originally calculated - 'cut off' points may not be optimal, but they shouldn't be 'drasticly + 'cut off' points may not be optimal, but they shouldn't be 'drastically wrong'. When allocating data from the bitmap, we are trying to do it in a @@ -1370,7 +1370,7 @@ static my_bool allocate_head(MARIA_FILE_BITMAP *bitmap, uint size, else { /* - This is not stricly needed as used_size should be alligned on 6, + This is not strictly needed as used_size should be alligned on 6, but for easier debugging lets try to keep it more accurate */ uint position= (uint) (best_data - bitmap->map) + 6; @@ -1758,7 +1758,7 @@ static my_bool find_tail(MARIA_HA *info, uint length, size_t position) /* We have to add DIR_ENTRY_SIZE to ensure we have space for the tail and - it's directroy entry on the page + its directory entry on the page */ while (allocate_tail(bitmap, length + DIR_ENTRY_SIZE, block)) if (move_to_next_bitmap(info, bitmap)) @@ -2208,7 +2208,7 @@ abort: This function is only called when the new row can't fit in the space of the old row in the head page. - This is essently same as _ma_bitmap_find_place() except that + This is essentially the same as _ma_bitmap_find_place() except that we don't call find_head() to search in bitmaps where to put the page. RETURN @@ -2637,7 +2637,7 @@ void _ma_bitmap_flushable(MARIA_HA *info, int non_flushable_inc) DBUG_ENTER("_ma_bitmap_flushable"); /* - Not transactional tables are never automaticly flushed and needs no + Not transactional tables are never automatically flushed and need no protection */ if (!share->now_transactional) diff --git a/storage/maria/ma_blockrec.c b/storage/maria/ma_blockrec.c index 005035f8e3f..84f196329ac 100644 --- a/storage/maria/ma_blockrec.c +++ b/storage/maria/ma_blockrec.c @@ -5054,7 +5054,7 @@ int _ma_read_block_record2(MARIA_HA *info, uchar *record, #ifdef EXTRA_DEBUG if (share->calc_checksum && !info->in_check_table) { - /* Esnure that row checksum is correct */ + /* Ensure that row checksum is correct */ DBUG_ASSERT(((share->calc_checksum)(info, record) & 255) == cur_row->checksum); } @@ -5715,7 +5715,7 @@ uint ma_calc_length_for_store_length(ulong nr) } -/* Retrive a stored number */ +/* Retrieve a stored number */ static ulong ma_get_length(const uchar **packet) { @@ -6244,7 +6244,7 @@ my_bool write_hook_for_undo_row_delete(enum translog_record_type type /** - @brief Upates "records" and "checksum" and calls the generic UNDO hook + @brief Updates "records" and "checksum" and calls the generic UNDO hook @return Operation status, always 0 (success) */ diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c index 8534d6d5b64..1a8de445264 100644 --- a/storage/maria/ma_check.c +++ b/storage/maria/ma_check.c @@ -143,7 +143,7 @@ void maria_chk_init_for_check(HA_CHECK *param, MARIA_HA *info) if (!info->s->base.born_transactional) { /* - There are no trids. Howver we want to set max_trid to make test of + There are no trids. However we want to set max_trid to make test of create_trid simpler. */ param->max_trid= ~(TrID) 0; @@ -1667,7 +1667,7 @@ static int check_page_layout(HA_CHECK *param, MARIA_HA *info, } *free_slots_found= free_entries; - /* Check directry */ + /* Check directory */ dir_entry= page+ block_size - PAGE_SUFFIX_SIZE; first_dir_entry= (block_size - row_count * DIR_ENTRY_SIZE - PAGE_SUFFIX_SIZE); @@ -1739,7 +1739,7 @@ static int check_page_layout(HA_CHECK *param, MARIA_HA *info, This is for rows-in-block format. Before this, we have already called check_page_layout(), so - we know the block is logicaly correct (even if the rows may not be that) + we know the block is logically correct (even if the rows may not be that) RETURN 0 ok diff --git a/storage/maria/ma_commit.c b/storage/maria/ma_commit.c index 4bd64bfdee0..534b4f651f8 100644 --- a/storage/maria/ma_commit.c +++ b/storage/maria/ma_commit.c @@ -83,7 +83,7 @@ int ma_commit(TRN *trn) /** - Writes a COMMIT record for a transaciton associated with a file + Writes a COMMIT record for a transaction associated with a file @param info Maria handler diff --git a/storage/maria/ma_delete.c b/storage/maria/ma_delete.c index 77ffb47d93c..349da9f904f 100644 --- a/storage/maria/ma_delete.c +++ b/storage/maria/ma_delete.c @@ -754,7 +754,7 @@ err: @brief Balances adjacent pages if underflow occours @fn underflow() - @param anc_buff Anchestor page data + @param anc_buff Ancestor page data @param leaf_page Leaf page (page that underflowed) @param leaf_page_link Pointer to pin information about leaf page @param keypos Position after current key in anc_buff diff --git a/storage/maria/ma_dynrec.c b/storage/maria/ma_dynrec.c index 00fd36fa34a..2da29933182 100644 --- a/storage/maria/ma_dynrec.c +++ b/storage/maria/ma_dynrec.c @@ -903,7 +903,7 @@ static my_bool update_dynamic_record(MARIA_HA *info, MARIA_RECORD_POS filepos, Check if next block is a deleted block Above we have MARIA_MIN_BLOCK_LENGTH to avoid the problem where the next block is so small it can't be splited which could - casue problems + cause problems */ MARIA_BLOCK_INFO del_block; diff --git a/storage/maria/ma_extra.c b/storage/maria/ma_extra.c index 0709f71ce18..f2789567fc6 100644 --- a/storage/maria/ma_extra.c +++ b/storage/maria/ma_extra.c @@ -225,7 +225,7 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function, info->read_record= share->read_record; info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS); break; - case HA_EXTRA_NO_USER_CHANGE: /* Database is somehow locked agains changes */ + case HA_EXTRA_NO_USER_CHANGE: /* Database is locked preventing changes */ info->lock_type= F_EXTRA_LCK; /* Simulate as locked */ break; case HA_EXTRA_WAIT_LOCK: diff --git a/storage/maria/ma_ft_stem.c b/storage/maria/ma_ft_stem.c index 9f3d285813e..ab4d594fc18 100644 --- a/storage/maria/ma_ft_stem.c +++ b/storage/maria/ma_ft_stem.c @@ -15,4 +15,4 @@ /* Written by Sergei A. Golubchik, who has a shared copyright to this code */ -/* mulitingual stem */ +/* multilingual stem */ diff --git a/storage/maria/ma_key_recover.c b/storage/maria/ma_key_recover.c index acec592b922..a199ac4ccc4 100644 --- a/storage/maria/ma_key_recover.c +++ b/storage/maria/ma_key_recover.c @@ -1190,7 +1190,7 @@ uint _ma_apply_redo_index(MARIA_HA *info, /* Clean old stuff up. Gives us better compression of we archive things - and makes things easer to debug + and makes things easier to debug */ if (page_length < org_page_length) bzero(buff + page_length, org_page_length-page_length); @@ -1251,7 +1251,7 @@ my_bool _ma_apply_undo_key_insert(MARIA_HA *info, LSN undo_lsn, is reached. For index with transid flag, the ref_length of the key is not correct. This should however be safe as long as this key is only used for - comparsion against other keys (not for packing or for read-next etc as + comparison against other keys (not for packing or for read-next etc as in this case we use data_length + ref_length, which is correct. */ key.keyinfo= share->keyinfo + keynr; diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c index 7bdc902f92c..b76f9020e2f 100644 --- a/storage/maria/ma_loghandler.c +++ b/storage/maria/ma_loghandler.c @@ -395,7 +395,7 @@ struct st_translog_descriptor DYNAMIC_ARRAY unfinished_files; /* - minimum number of still need file calculeted during last + minimum number of still needed file calculated during last translog_purge call */ uint32 min_need_file; @@ -1582,7 +1582,7 @@ static my_bool translog_buffer_init(struct st_translog_buffer *buffer, int num) /* @brief close transaction log file by descriptor - @param file pagegecache file descriptor reference + @param file pagecache file descriptor reference @return Operation status @retval 0 OK @@ -1939,7 +1939,7 @@ static void translog_finish_page(TRANSLOG_ADDRESS *horizon, DBUG_ASSERT(LSN_FILE_NO(*horizon) == LSN_FILE_NO(cursor->buffer->offset) || translog_status == TRANSLOG_UNINITED); if ((LSN_FILE_NO(*horizon) != LSN_FILE_NO(cursor->buffer->offset))) - DBUG_VOID_RETURN; // everything wrong do not write to awoid more problems + DBUG_VOID_RETURN; // everything wrong do not write to avoid more problems translog_check_cursor(cursor); if (cursor->protected) { @@ -6740,7 +6740,7 @@ translog_scanner_get_page(TRANSLOG_SCANNER_DATA *scanner) @param fixed_horizon true if it is OK do not read records which was written after scanning beginning @param scanner scanner which have to be inited - @param use_direct prefer using direct lings from page handler + @param use_direct prefer using direct links from page handler where it is possible. @note If direct link was used translog_destroy_scanner should be @@ -6844,7 +6844,7 @@ static my_bool translog_scanner_eol(TRANSLOG_SCANNER_DATA *scanner) /** - @brief Cheks End of the Page + @brief Checks End of the Page @param scanner Information about current chunk during scanning diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c index 6fb293bd705..a4d1beb4a0f 100644 --- a/storage/maria/ma_open.c +++ b/storage/maria/ma_open.c @@ -181,7 +181,7 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, maria_delay_key_write) share->delay_key_write=1; - if (!share->now_transactional) /* If not transctional table */ + if (!share->now_transactional) /* If not transactional table */ { /* Pagecache requires access to info->trn->rec_lsn */ _ma_set_tmp_trn_for_table(&info, &dummy_transaction_object); @@ -548,7 +548,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags, /* A transactional table is not usable on this system if: - share->state.create_trid > trnman_get_max_trid() - - Critical as trid as stored releative to create_trid. + - Critical as trid is stored relative to create_trid. - uuid is different STATE_NOT_MOVABLE is reset when a table is zerofilled diff --git a/storage/maria/ma_packrec.c b/storage/maria/ma_packrec.c index 026a2d59c1c..33287c6734e 100644 --- a/storage/maria/ma_packrec.c +++ b/storage/maria/ma_packrec.c @@ -1460,7 +1460,7 @@ uint _ma_pack_get_block_info(MARIA_HA *maria, MARIA_BIT_BUFF *bit_buff, } - /* rutines for bit buffer */ + /* routines for bit buffer */ /* Note buffer must be 6 uchar bigger than longest row */ static void init_bit_buffer(MARIA_BIT_BUFF *bit_buff, uchar *buffer, diff --git a/storage/maria/ma_page.c b/storage/maria/ma_page.c index a3ee3b5eb3e..ab10aada7c3 100644 --- a/storage/maria/ma_page.c +++ b/storage/maria/ma_page.c @@ -87,7 +87,7 @@ void page_cleanup(MARIA_SHARE *share, MARIA_PAGE *page) @param lock Lock type for page @param level Importance of page; Priority for page cache @param buff Buffer to use for page - @param return_buffer Set to 1 if we want to force useage of buff + @param return_buffer Set to 1 if we want to force usage of the "buff" @return @retval 0 ok diff --git a/storage/maria/ma_pagecache.c b/storage/maria/ma_pagecache.c index 1b09fbe76f4..c00b3cb88fb 100644 --- a/storage/maria/ma_pagecache.c +++ b/storage/maria/ma_pagecache.c @@ -1067,7 +1067,7 @@ size_t resize_pagecache(PAGECACHE *pagecache, finish: wqueue_unlink_from_queue(wqueue, thread); - /* Signal for the next resize request to proceeed if any */ + /* Signal for the next resize request to proceed if any */ if (wqueue->last_thread) { DBUG_PRINT("signal", @@ -3180,7 +3180,7 @@ static void check_and_set_lsn(PAGECACHE *pagecache, @brief Unlock/unpin page and put LSN stamp if it need @param pagecache pointer to a page cache data structure - @pagam file handler for the file for the block of data to be read + @param file file handler for the block of data to be read @param pageno number of the block of data in the file @param lock lock change @param pin pin page @@ -3192,8 +3192,8 @@ static void check_and_set_lsn(PAGECACHE *pagecache, direct link giving and the page was changed @note - Pininig uses requests registration mechanism it works following way: - | beginnig | ending | + Pinning uses requests registration mechanism that works following way: + | beginning | ending | | of func. | of func. | ----------------------------+-------------+---------------+ PAGECACHE_PIN_LEFT_PINNED | - | - | @@ -3294,7 +3294,7 @@ void pagecache_unlock(PAGECACHE *pagecache, SYNOPSIS pagecache_unpin() pagecache pointer to a page cache data structure - file handler for the file for the block of data to be read + file file handler for the block of data to be read pageno number of the block of data in the file lsn if it is not LSN_IMPOSSIBLE (0) and it is bigger then LSN on the page it will be written on @@ -3684,7 +3684,7 @@ static struct rw_pin_change lock_to_pin[2][8]= @brief Read a block of data from a cached file into a buffer; @param pagecache pointer to a page cache data structure - @param file handler for the file for the block of data to be read + @param file file handler for the block of data to be read @param pageno number of the block of data in the file @param level determines the weight of the data @param buff buffer to where the data must be placed @@ -3744,7 +3744,7 @@ restart: /* If we use big block than the big block is multiple of blocks and we - have enouch blocks in cache + have enough blocks in cache */ DBUG_ASSERT(!pagecache->big_block_read || (file->big_block_size != 0 && @@ -4168,7 +4168,7 @@ void pagecache_add_level_by_link(PAGECACHE_BLOCK_LINK *block, @brief Delete page from the buffer @param pagecache pointer to a page cache data structure - @param file handler for the file for the block of data to be read + @param file file handler for the block of data to be read @param pageno number of the block of data in the file @param lock lock change @param flush flush page if it is dirty diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c index 8a6060562b8..061c653460d 100644 --- a/storage/maria/ma_recovery.c +++ b/storage/maria/ma_recovery.c @@ -757,7 +757,7 @@ static my_bool create_database_if_not_exists(const char *name) dirname_part(dirname, name, &length); if (!length) { - /* Skip files without directores */ + /* Skip files without directories */ DBUG_RETURN(0); } /* @@ -3930,7 +3930,7 @@ state is current and can be flushed. So we have a per-table sequence: Launch one or more threads to do the background rollback. Don't wait for them to complete their rollback (background rollback; for debugging, we can have an option which waits). Set a counter (total_of_rollback_threads) - to the number of threads to lauch. + to the number of threads to launch. Note that InnoDB's rollback-in-background works as long as InnoDB is the last engine to recover, otherwise MySQL will refuse new connections until diff --git a/storage/maria/ma_rfirst.c b/storage/maria/ma_rfirst.c index 44d19485a5f..a1e79ca9b7f 100644 --- a/storage/maria/ma_rfirst.c +++ b/storage/maria/ma_rfirst.c @@ -15,7 +15,7 @@ #include "maria_def.h" - /* Read first row through a specfic key */ + /* Read first row through a specific key */ int maria_rfirst(MARIA_HA *info, uchar *buf, int inx) { diff --git a/storage/maria/ma_rt_split.c b/storage/maria/ma_rt_split.c index 88f00c6b46b..e4b786fcc90 100644 --- a/storage/maria/ma_rt_split.c +++ b/storage/maria/ma_rt_split.c @@ -279,7 +279,7 @@ static int split_maria_rtree_node(SplitStruct *node, int n_entries, @param key_with_nod_flag pointer to key-nod_flag @param full_length length of (key + (nod_flag (if node) or rowid (if leaf))) - @param log_internal_copy encoded list of mempcy() operations done on + @param log_internal_copy encoded list of memcpy() operations done on split page, having their source in the page @param log_internal_copy_length length of above list, in bytes @param log_key_copy operation describing the key's copy, or NULL if the diff --git a/storage/maria/ma_search.c b/storage/maria/ma_search.c index 1dccdf9279e..b6b6fdd3395 100644 --- a/storage/maria/ma_search.c +++ b/storage/maria/ma_search.c @@ -1392,7 +1392,7 @@ uint _ma_get_binary_pack_key(MARIA_KEY *int_key, uint page_flag, uint nod_flag, } /** - skip key which is ptefix packed against previous key + skip key which is prefix packed against previous key @fn _ma_skip_binary_key() @param key Keyinfo and buffer that can be used diff --git a/storage/maria/ma_sort.c b/storage/maria/ma_sort.c index 97f22103e46..9363eb6dce9 100644 --- a/storage/maria/ma_sort.c +++ b/storage/maria/ma_sort.c @@ -942,7 +942,7 @@ cleanup: buffpek Where to read from sort_length max length to read RESULT - > 0 Ammount of bytes read + > 0 Amount of bytes read -1 Error */ diff --git a/storage/maria/ma_sp_key.c b/storage/maria/ma_sp_key.c index 47750d82b01..45d05c6f996 100644 --- a/storage/maria/ma_sp_key.c +++ b/storage/maria/ma_sp_key.c @@ -33,7 +33,7 @@ static int sp_mbr_from_wkb(uchar (*wkb), uint size, uint n_dims, double *mbr); /** - Create spactial key + Create spatial key */ MARIA_KEY *_ma_sp_make_key(MARIA_HA *info, MARIA_KEY *ret_key, uint keynr, diff --git a/storage/maria/ma_state.c b/storage/maria/ma_state.c index a0a8c51dfb2..16a01ed77bd 100644 --- a/storage/maria/ma_state.c +++ b/storage/maria/ma_state.c @@ -492,7 +492,7 @@ my_bool _ma_trnman_end_trans_hook(TRN *trn, my_bool commit, share->state_history. Create a new history item for this commit and add it first in the state_history list. This ensures that all history items are stored in the list in - decresing trid order. + decreasing trid order. */ if (!(history= my_malloc(PSI_INSTRUMENT_ME, sizeof(*history), MYF(MY_WME)))) diff --git a/storage/maria/ma_update.c b/storage/maria/ma_update.c index d7624f3299b..c7172fc1e51 100644 --- a/storage/maria/ma_update.c +++ b/storage/maria/ma_update.c @@ -169,7 +169,7 @@ int maria_update(register MARIA_HA *info, const uchar *oldrec, } /* - We can't yet have HA_STATE_AKTIV here, as block_record dosn't support it + We can't yet have HA_STATE_AKTIV here, as block_record doesn't support it */ info->update= (HA_STATE_CHANGED | HA_STATE_ROW_CHANGED | key_changed); info->row_changes++; diff --git a/storage/maria/ma_write.c b/storage/maria/ma_write.c index 0d6e18726db..7994a40ecac 100644 --- a/storage/maria/ma_write.c +++ b/storage/maria/ma_write.c @@ -1320,7 +1320,7 @@ static int _ma_balance_page(MARIA_HA *info, MARIA_KEYDEF *keyinfo, if ((right ? right_length : left_length) + curr_keylength <= share->max_index_block_size) { - /* Enough space to hold all keys in the two buffers ; Balance bufferts */ + /* Enough space to hold all keys in the two buffers ; Balance buffers */ new_left_length= share->keypage_header+nod_flag+(keys/2)*curr_keylength; new_right_length=share->keypage_header+nod_flag+(((keys+1)/2)* curr_keylength); @@ -1498,7 +1498,7 @@ static int _ma_balance_page(MARIA_HA *info, MARIA_KEYDEF *keyinfo, extra_page.buff= extra_buff; /* - 5 is the minum number of keys we can have here. This comes from + 5 is the minimum number of keys we can have here. This comes from the fact that each full page can store at least 2 keys and in this case we have a 'split' key, ie 2+2+1 = 5 */ @@ -1561,7 +1561,7 @@ static int _ma_balance_page(MARIA_HA *info, MARIA_KEYDEF *keyinfo, { /* Page order according to key values: - orignal_page (curr_page = left_page), next_page (buff), extra_buff + original_page (curr_page = left_page), next_page (buff), extra_buff Move page positions so that we store data in extra_page where next_page was and next_page will be stored at the new position @@ -2052,7 +2052,7 @@ my_bool _ma_log_change(MARIA_PAGE *ma_page, const uchar *key_pos, uint length, @note Write log entry for page that has got a key added to the page under - one and only one of the following senarios: + one and only one of the following scenarios: - Page is shortened from end - Data is added to end of page - Data added at front of page diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h index f535a769ae3..7234df00e76 100644 --- a/storage/maria/maria_def.h +++ b/storage/maria/maria_def.h @@ -162,7 +162,7 @@ typedef struct st_maria_info } MARIA_INFO; struct st_maria_share; -struct st_maria_handler; /* For referense */ +struct st_maria_handler; /* For reference */ struct st_maria_keydef; struct st_maria_key /* Internal info about a key */ @@ -463,7 +463,7 @@ typedef struct st_maria_state_info my_off_t dellink; /* Link to next removed block */ pgcache_page_no_t first_bitmap_with_space; ulonglong auto_increment; - TrID create_trid; /* Minum trid for file */ + TrID create_trid; /* Minimum trid for file */ TrID last_change_trn; /* selfdescriptive */ ulong update_count; /* Updated for each write lock */ ulong status; @@ -754,7 +754,7 @@ typedef struct st_maria_share my_off_t (*recpos_to_keypos)(struct st_maria_share *share, my_off_t pos); my_bool (*row_is_visible)(MARIA_HA *); - /* Mapings to read/write the data file */ + /* Mappings to read/write the data file */ size_t (*file_read)(MARIA_HA *, uchar *, size_t, my_off_t, myf); size_t (*file_write)(MARIA_HA *, const uchar *, size_t, my_off_t, myf); /* query cache invalidator for merged tables */ @@ -992,7 +992,7 @@ struct st_maria_handler my_off_t last_search_keypage; /* Last keypage when searching */ /* - QQ: the folloing two xxx_length fields should be removed, + QQ: the following two xxx_length fields should be removed, as they are not compatible with parallel repair */ ulong packed_length, blob_length; /* Length of found, packed record */ @@ -1011,7 +1011,7 @@ struct st_maria_handler int lastinx; /* Last used index */ uint last_rkey_length; /* Last length in maria_rkey() */ uint *last_rtree_keypos; /* Last key positions for rtrees */ - uint bulk_insert_ref_length; /* Lenght of row ref during bi */ + uint bulk_insert_ref_length; /* Length of row ref during bi */ uint non_flushable_state; enum ha_rkey_function last_key_func; /* CONTAIN, OVERLAP, etc */ uint save_lastkey_data_length; diff --git a/storage/maria/s3_func.c b/storage/maria/s3_func.c index 587e9b19f21..c3de1b4a05f 100644 --- a/storage/maria/s3_func.c +++ b/storage/maria/s3_func.c @@ -882,7 +882,7 @@ int partition_copy_to_s3(ms3_st *s3_client, const char *aws_bucket, if ((error= s3_read_file_from_disk(filename, &alloc_block, &frm_length, 0))) { /* - In case of ADD PARTITION PARTITON the .frm file is already renamed. + In case of ADD PARTITION the .frm file is already renamed. Copy the renamed file if it exists. */ fn_format(filename, path, "", ".frm", MY_REPLACE_EXT); diff --git a/storage/maria/tablockman.c b/storage/maria/tablockman.c index 180487a888a..f5e9071e51c 100644 --- a/storage/maria/tablockman.c +++ b/storage/maria/tablockman.c @@ -363,7 +363,7 @@ tablockman_getlock(TABLOCKMAN *lm, TABLE_LOCK_OWNER *lo, We don't really need tmp->waiting_for, as tmp->waiting_for_loid is enough. waiting_for is just a local cache to avoid calling loid_to_tlo(). - But it's essensial that tmp->waiting_for pointer can ONLY + But it's essential that tmp->waiting_for pointer can ONLY be dereferenced if find_by_loid() above returns a non-null pointer, because a TABLE_LOCK_OWNER object that it points to may've been freed when we come here after a signal. diff --git a/storage/maria/unittest/ma_control_file-t.c b/storage/maria/unittest/ma_control_file-t.c index fdbe86de01b..c61b9c742f1 100644 --- a/storage/maria/unittest/ma_control_file-t.c +++ b/storage/maria/unittest/ma_control_file-t.c @@ -174,7 +174,7 @@ int main(int argc,char *argv[]) ok(0 == test_bad_magic_string(), "test of bad magic string"); ok(0 == test_bad_checksum(), "test of bad checksum"); ok(0 == test_bad_hchecksum(), "test of bad hchecksum"); - ok(0 == test_future_size(), "test of ability to handlr future versions"); + ok(0 == test_future_size(), "test of ability to handle future versions"); ok(0 == test_bad_blocksize(), "test of bad blocksize"); ok(0 == test_bad_size(), "test of too small/big file"); @@ -475,7 +475,7 @@ static int test_future_size(void) /* fix lengths */ int2store(buffer + CF_CREATE_TIME_SIZE_OFFSET, CF_CREATE_TIME_TOTAL_SIZE + 1); int2store(buffer + CF_CHANGEABLE_SIZE_OFFSET, CF_CHANGEABLE_TOTAL_SIZE + 1); - /* recalculete checksums */ + /* recalculate checksums */ sum= (uint32) my_checksum(0, buffer, CF_CREATE_TIME_TOTAL_SIZE - CF_CHECKSUM_SIZE + 1); int4store(buffer + CF_CREATE_TIME_TOTAL_SIZE - CF_CHECKSUM_SIZE + 1, sum); diff --git a/storage/maria/unittest/ma_pagecache_single.c b/storage/maria/unittest/ma_pagecache_single.c index 9009e59c245..f34bf5ac0a0 100644 --- a/storage/maria/unittest/ma_pagecache_single.c +++ b/storage/maria/unittest/ma_pagecache_single.c @@ -600,7 +600,7 @@ int simple_big_test() desc[i].length= 0; desc[i].content= '\0'; ok(1, "Simple big file write"); - /* check written pages sequentally read */ + /* check written pages sequentially read */ for (i= 0; i < PCACHE_SIZE/(TEST_PAGE_SIZE/2); i++) { int j; diff --git a/storage/maria/unittest/sequence_storage.c b/storage/maria/unittest/sequence_storage.c index 1e6b3fcb239..74474731f07 100644 --- a/storage/maria/unittest/sequence_storage.c +++ b/storage/maria/unittest/sequence_storage.c @@ -95,7 +95,7 @@ void seq_storage_rewind(SEQ_STORAGE *seq) @brief Writes a number to the sequence file. @param file Path to the file where to write the sequence - @pagem num Number to be written + @param num Number to be written @retval 0 OK @retval 1 Error diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index b145acc82bd..83b8045f16f 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -485,7 +485,7 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out, - compare FULLTEXT keys; - compare SPATIAL keys; - compare FIELD_SKIP_ZERO which is converted to FIELD_NORMAL correctly - (should be corretly detected in table2myisam). + (should be correctly detected in table2myisam). */ int check_definition(MI_KEYDEF *t1_keyinfo, MI_COLUMNDEF *t1_recinfo, @@ -642,7 +642,7 @@ void mi_check_print_warning(HA_CHECK *param, const char *fmt,...) /** Report list of threads (and queries) accessing a table, thread_id of a - thread that detected corruption, ource file name and line number where + thread that detected corruption, source file name and line number where this corruption was detected, optional extra information (string). This function is intended to be used when table corruption is detected. @@ -1313,7 +1313,7 @@ int ha_myisam::repair(THD *thd, HA_CHECK ¶m, bool do_optimize) #endif /* The following is to catch errors when my_errno is no set properly - during repairt + during repair */ my_errno= 0; if (mi_test_if_sort_rep(file,file->state->records,tmp_key_map,0) && diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c index 60dc57c84df..1d9f00a8c74 100644 --- a/storage/myisam/mi_check.c +++ b/storage/myisam/mi_check.c @@ -1750,7 +1750,7 @@ err: } -/* Uppate keyfile when doing repair */ +/* Update keyfile when doing repair */ static int writekeys(MI_SORT_PARAM *sort_param) { @@ -2976,7 +2976,7 @@ int mi_repair_parallel(HA_CHECK *param, register MI_INFO *info, if (sort_param[0].fix_datafile) { /* - Append some nuls to the end of a memory mapped file. Destroy the + Append some nulls to the end of a memory mapped file. Destroy the write cache. The master thread did already detach from the share by remove_io_thread() in sort.c:thr_find_all_keys(). */ diff --git a/storage/myisam/mi_extra.c b/storage/myisam/mi_extra.c index daaa5d2e259..be197c87d71 100644 --- a/storage/myisam/mi_extra.c +++ b/storage/myisam/mi_extra.c @@ -208,7 +208,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg) info->read_record= share->read_record; info->opt_flag&= ~(KEY_READ_USED | REMEMBER_OLD_POS); break; - case HA_EXTRA_NO_USER_CHANGE: /* Database is somehow locked against changes */ + case HA_EXTRA_NO_USER_CHANGE: /* Database is locked preventing changes */ info->lock_type= F_EXTRA_LCK; /* Simulate as locked */ break; case HA_EXTRA_WAIT_LOCK: diff --git a/storage/myisam/mi_packrec.c b/storage/myisam/mi_packrec.c index c473615cc39..eaa7e04eab9 100644 --- a/storage/myisam/mi_packrec.c +++ b/storage/myisam/mi_packrec.c @@ -1412,7 +1412,7 @@ uint _mi_pack_get_block_info(MI_INFO *myisam, MI_BIT_BUFF *bit_buff, /* - Rutines for bit buffer + Routines for bit buffer Note: buffer must be 6 byte bigger than longest row */ diff --git a/storage/myisam/mi_range.c b/storage/myisam/mi_range.c index e5342a1905c..2f904fb6e24 100644 --- a/storage/myisam/mi_range.c +++ b/storage/myisam/mi_range.c @@ -292,7 +292,7 @@ err: } - /* Get keynummer of current key and max number of keys in nod */ + /* Get keynumber of current key and max number of keys in nod */ static uint _mi_keynr(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page, uchar *keypos, uint *ret_max_key) diff --git a/storage/myisam/mi_search.c b/storage/myisam/mi_search.c index ca31bc5af8b..7f1d37d6b64 100644 --- a/storage/myisam/mi_search.c +++ b/storage/myisam/mi_search.c @@ -322,7 +322,7 @@ int _mi_prefix_search(MI_INFO *info, register MI_KEYDEF *keyinfo, uchar *page, get_key_pack_length(kseg_len,length_pack,kseg); key_len_skip=length_pack+kseg_len; key_len_left=(int) key_len- (int) key_len_skip; - /* If key_len is 0, then lenght_pack is 1, then key_len_left is -1. */ + /* If key_len is 0, then length_pack is 1, then key_len_left is -1. */ cmplen=(key_len_left>=0) ? kseg_len : key_len-length_pack; DBUG_PRINT("info",("key: '%.*s'",kseg_len,kseg)); diff --git a/storage/myisam/mi_test1.c b/storage/myisam/mi_test1.c index 5a614edb563..3648698eac1 100644 --- a/storage/myisam/mi_test1.c +++ b/storage/myisam/mi_test1.c @@ -380,7 +380,7 @@ static void create_key(uchar *key,uint rownr) if (rownr == 0) { key[0]=1; /* null key */ - key[1]=0; /* Fore easy print of key */ + key[1]=0; /* For easy print of key */ return; } *key++=0; diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c index 077507e897c..0afaa48fc3d 100644 --- a/storage/myisam/myisampack.c +++ b/storage/myisam/myisampack.c @@ -2818,7 +2818,7 @@ static char *make_old_name(char *new_name, char *old_name) return fn_format(new_name,old_name,"",OLD_EXT,2+4); } - /* rutines for bit writing buffer */ + /* routines for bit writing buffer */ static void init_file_buffer(File file, pbool read_buffer) { diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc index 4887550c0e8..28e311a0ef0 100644 --- a/storage/myisammrg/ha_myisammrg.cc +++ b/storage/myisammrg/ha_myisammrg.cc @@ -58,7 +58,7 @@ the table in MYRG_INFO::children_attached. If necessary, the compatibility of parent and children is checked. - This check is necessary when any of the objects are reopend. This is + This check is necessary when any of the objects are reopened. This is detected by comparing the current table def version against the remembered child def version. On parent open, the list members are initialized to an "impossible"/"undefined" version value. So the check @@ -1284,7 +1284,7 @@ int ha_myisammrg::info(uint flag) /* valgrind may be unhappy about it, because optimizer may access values between file->keys and table->key_parts, that will be uninitialized. - It's safe though, because even if opimizer will decide to use a key + It's safe though, because even if optimizer will decide to use a key with such a number, it'll be an error later anyway. */ bzero((char*) table->key_info[0].rec_per_key, diff --git a/storage/myisammrg/ha_myisammrg.h b/storage/myisammrg/ha_myisammrg.h index 02f9fa402d5..db143b4a5f1 100644 --- a/storage/myisammrg/ha_myisammrg.h +++ b/storage/myisammrg/ha_myisammrg.h @@ -196,7 +196,7 @@ public: /* Make an optionally lower-cases filename_to_tablename-decoded identifier - in chirdren mem_root. + in children mem_root. */ LEX_STRING make_child_ident_filename_to_tablename(const char *src, bool casedn) diff --git a/storage/myisammrg/myrg_extra.c b/storage/myisammrg/myrg_extra.c index 2b3861b9f7f..1c4fca44fcd 100644 --- a/storage/myisammrg/myrg_extra.c +++ b/storage/myisammrg/myrg_extra.c @@ -16,7 +16,7 @@ /* Extra functions we want to do with a database - - All flags, exept record-cache-flags, are set in all used databases + - All flags, except record-cache-flags, are set in all used databases record-cache-flags are set in myrg_rrnd when we are changing database. */ diff --git a/storage/oqgraph/oqgraph_shim.h b/storage/oqgraph/oqgraph_shim.h index 93c4e5fe04e..68098e2d070 100644 --- a/storage/oqgraph/oqgraph_shim.h +++ b/storage/oqgraph/oqgraph_shim.h @@ -474,7 +474,7 @@ namespace oqgraph3 find_vertex(oqgraph3::vertex_id id, const oqgraph3::graph& g) { // Fix for https://bugs.launchpad.net/oqgraph/+bug/1196020 returning vertex even when not in graph - // Psuedocode for fix: + // Pseudocode for fix: // if count(*) from g->TABLE where source=id or target=id > 0 then return id else return null oqgraph3::cursor* found_cursor = new oqgraph3::cursor(const_cast(&g)); bool found = (found_cursor->seek_to(id, boost::none) && found_cursor->seek_to(boost::none, id)); diff --git a/storage/perfschema/ha_perfschema.h b/storage/perfschema/ha_perfschema.h index 10e74131f30..7eba2f2a2b7 100644 --- a/storage/perfschema/ha_perfschema.h +++ b/storage/perfschema/ha_perfschema.h @@ -222,7 +222,7 @@ private: - performing point in time recovery in 5.6 with old archived logs. This API detects when the code calling the performance schema storage - engine is a slave thread or whether the code calling isthe client thread + engine is a slave thread or whether the code calling is the client thread executing a BINLOG'.. statement. This API acts as a late filter for the above mentioned cases. diff --git a/storage/perfschema/pfs_global.h b/storage/perfschema/pfs_global.h index 53425cdd4d7..855fd501325 100644 --- a/storage/perfschema/pfs_global.h +++ b/storage/perfschema/pfs_global.h @@ -115,7 +115,7 @@ uint pfs_get_socket_address(char *host, /** Compute a random index value in an interval. @param ptr seed address - @param max_size maximun size of the interval + @param max_size maximum size of the interval @return a random value in [0, max_size-1] */ inline uint randomized_index(const void *ptr, uint max_size) diff --git a/storage/perfschema/pfs_program.h b/storage/perfschema/pfs_program.h index a5a6245c1cc..0f54f4ec983 100644 --- a/storage/perfschema/pfs_program.h +++ b/storage/perfschema/pfs_program.h @@ -71,7 +71,7 @@ struct PFS_ALIGNED PFS_program : public PFS_instr /** Stored program stat. */ PFS_sp_stat m_sp_stat; - /** Referesh setup object flags. */ + /** Refresh setup object flags. */ void refresh_setup_object_flags(PFS_thread* thread); /** Reset data for this record. */ diff --git a/storage/perfschema/pfs_server.h b/storage/perfschema/pfs_server.h index af3b6e87b66..a6d0388abd0 100644 --- a/storage/perfschema/pfs_server.h +++ b/storage/perfschema/pfs_server.h @@ -180,7 +180,7 @@ struct PFS_global_param */ long m_file_handle_sizing; /** - Maxium number of instrumented socket instances + Maximum number of instrumented socket instances @sa socket_lost */ long m_socket_sizing; diff --git a/storage/perfschema/pfs_timer.h b/storage/perfschema/pfs_timer.h index e5393a4a8a4..c1b2dd7a3d1 100644 --- a/storage/perfschema/pfs_timer.h +++ b/storage/perfschema/pfs_timer.h @@ -48,7 +48,7 @@ struct time_normalizer */ static time_normalizer* get(enum_timer_name timer_name); - /** Timer value at server statup. */ + /** Timer value at server startup. */ ulonglong m_v0; /** Conversion factor from timer values to pico seconds. */ ulonglong m_factor; diff --git a/storage/perfschema/pfs_variable.cc b/storage/perfschema/pfs_variable.cc index 3a1e83d7668..692e38958f2 100644 --- a/storage/perfschema/pfs_variable.cc +++ b/storage/perfschema/pfs_variable.cc @@ -1265,7 +1265,7 @@ void sum_host_status(PFS_client *pfs_host, STATUS_VAR *status_totals) /* Get status totals for this account from active THDs and from totals aggregated - from disconnectd threads. + from disconnected threads. */ void sum_account_status(PFS_client *pfs_account, STATUS_VAR *status_totals) { diff --git a/storage/perfschema/table_replication_connection_configuration.cc b/storage/perfschema/table_replication_connection_configuration.cc index 71c39b2e98d..9dbd35838fd 100644 --- a/storage/perfschema/table_replication_connection_configuration.cc +++ b/storage/perfschema/table_replication_connection_configuration.cc @@ -37,7 +37,7 @@ #include "rpl_rli.h" #include "rpl_mi.h" #include "sql_parse.h" -//#include "rpl_msr.h" /* Multisource replciation */ +//#include "rpl_msr.h" /* Multisource replication */ #ifdef HAVE_REPLICATION THR_LOCK table_replication_connection_configuration::m_table_lock; diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index a6d38a700d0..8fa232b5753 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -1547,7 +1547,7 @@ static MYSQL_SYSVAR_BOOL( nullptr, true); // When pin_l0_filter_and_index_blocks_in_cache is true, RocksDB will use the -// LRU cache, but will always keep the filter & idndex block's handle checked +// LRU cache, but will always keep the filter & index block's handle checked // out (=won't call ShardedLRUCache::Release), plus the parsed out objects // the LRU cache will never push flush them out, hence they're pinned. // @@ -3366,7 +3366,7 @@ private: rocksdb::Status get(rocksdb::ColumnFamilyHandle *const column_family, const rocksdb::Slice &key, rocksdb::PinnableSlice *const value) const override { - // clean PinnableSlice right begfore Get() for multiple gets per statement + // clean PinnableSlice right before Get() for multiple gets per statement // the resources after the last Get in a statement are cleared in // handler::reset call value->Reset(); @@ -7849,7 +7849,7 @@ int ha_rocksdb::create(const char *const name, TABLE *const table_arg, int err; /* - Construct dbname.tablename ourselves, because parititioning + Construct dbname.tablename ourselves, because partitioning passes strings like "./test/t14#P#p0" for individual partitions, while table_arg->s->table_name has none of that. */ @@ -9552,7 +9552,7 @@ const std::string ha_rocksdb::generate_cf_name( DBUG_ASSERT(per_part_match_found != nullptr); // When creating CF-s the caller needs to know if there was a custom CF name - // specified for a given paritition. + // specified for a given partition. *per_part_match_found = false; // Index comment is used to define the column family name specification(s). @@ -9928,7 +9928,7 @@ int ha_rocksdb::check_and_lock_sk(const uint key_id, } /** - Enumerate all keys to check their uniquess and also lock it + Enumerate all keys to check their uniqueness and also lock it @param[in] row_info hold all data for update row, such as old row data and new row data @@ -11010,7 +11010,7 @@ int ha_rocksdb::info(uint flag) { uint64_t memtableCount; uint64_t memtableSize; - // the stats below are calculated from skiplist wich is a probablistic + // the stats below are calculated from skiplist which is a probabilistic // data structure, so the results vary between test runs // it also can return 0 for quite a large tables which means that // cardinality for memtable only indxes will be reported as 0 @@ -12313,7 +12313,7 @@ void ha_rocksdb::get_auto_increment(ulonglong off, ulonglong inc, // Optimization for the standard case where we are always simply // incrementing from the last position - // Use CAS operation in a loop to make sure automically get the next auto + // Use CAS operation in a loop to atomically get the next auto // increment value while ensuring that we don't wrap around to a negative // number. // diff --git a/storage/rocksdb/properties_collector.h b/storage/rocksdb/properties_collector.h index a924227d5b1..020e89ace00 100644 --- a/storage/rocksdb/properties_collector.h +++ b/storage/rocksdb/properties_collector.h @@ -93,9 +93,9 @@ class Rdb_tbl_card_coll { * Cardinality statistics might be calculated using some sampling strategy. * This method adjusts gathered statistics according to the sampling * strategy used. Note that adjusted cardinality value is just an estimate - * and can return a value exeeding number of rows in a table, so the + * and can return a value exceeding number of rows in a table, so the * returned value should be capped by row count before using it by - * an optrimizer or displaying it to a clent. + * an optimizer or displaying it to a client. */ void AdjustStats(Rdb_index_stats *stats); diff --git a/storage/rocksdb/rdb_cf_options.cc b/storage/rocksdb/rdb_cf_options.cc index 19cb104c668..0dea0bc7d73 100644 --- a/storage/rocksdb/rdb_cf_options.cc +++ b/storage/rocksdb/rdb_cf_options.cc @@ -73,7 +73,7 @@ void Rdb_cf_options::get(const std::string &cf_name, // Get defaults. rocksdb::GetColumnFamilyOptionsFromString(*opts, m_default_config, opts); - // Get a custom confguration if we have one. + // Get a custom configuration if we have one. Name_to_config_t::iterator it = m_name_map.find(cf_name); if (it != m_name_map.end()) { diff --git a/storage/rocksdb/rdb_converter.cc b/storage/rocksdb/rdb_converter.cc index 6f8aa306668..433227782bf 100644 --- a/storage/rocksdb/rdb_converter.cc +++ b/storage/rocksdb/rdb_converter.cc @@ -257,7 +257,7 @@ int Rdb_value_field_iterator::next() { m_field_dec = m_field_iter->m_field_enc; bool decode = m_field_iter->m_decode; bool maybe_null = m_field_dec->maybe_null(); - // This is_null value is bind to how stroage format store its value + // This is_null value is binded to how storage format stores its value m_is_null = maybe_null && ((m_null_bytes[m_field_dec->m_null_offset] & m_field_dec->m_null_mask) != 0); diff --git a/storage/rocksdb/rdb_converter.h b/storage/rocksdb/rdb_converter.h index 6ace89b3366..4fc71a3b24f 100644 --- a/storage/rocksdb/rdb_converter.h +++ b/storage/rocksdb/rdb_converter.h @@ -76,7 +76,7 @@ class Rdb_convert_to_record_value_decoder { /** Class to iterator fields in RocksDB value slice A template class instantiation represent a way to decode the data. - The reason to use template class instead of normal class is to elimate + The reason to use template class instead of normal class is to eliminate virtual method call. */ template diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index 895f51811a6..29aa68b56e6 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -1628,7 +1628,7 @@ int Rdb_key_def::unpack_record(TABLE *const table, uchar *const buf, Rdb_string_reader reader(packed_key); Rdb_string_reader unp_reader = Rdb_string_reader::read_or_empty(unpack_info); - // There is no checksuming data after unpack_info for primary keys, because + // There is no checksumming data after unpack_info for primary keys, because // the layout there is different. The checksum is verified in // ha_rocksdb::convert_record_from_storage_format instead. DBUG_ASSERT_IMP(!(m_index_type == INDEX_TYPE_SECONDARY), @@ -2202,7 +2202,7 @@ void Rdb_key_def::pack_legacy_variable_format( flag is set to N. For N=9, the following input values encode to the specified - outout (where 'X' indicates a byte of the original input): + output (where 'X' indicates a byte of the original input): - 0 bytes is encoded as 0 0 0 0 0 0 0 0 0 - 1 byte is encoded as X 0 0 0 0 0 0 0 1 - 2 bytes is encoded as X X 0 0 0 0 0 0 2 @@ -4161,7 +4161,7 @@ bool Rdb_ddl_manager::init(Rdb_dict_manager *const dict_arg, /* If validate_tables is greater than 0 run the validation. Only fail the - initialzation if the setting is 1. If the setting is 2 we continue. + initialization if the setting is 1. If the setting is 2 we continue. */ if (validate_tables > 0) { std::string msg; diff --git a/storage/rocksdb/rdb_mutex_wrapper.h b/storage/rocksdb/rdb_mutex_wrapper.h index bcc92533e73..a91f7f6bc68 100644 --- a/storage/rocksdb/rdb_mutex_wrapper.h +++ b/storage/rocksdb/rdb_mutex_wrapper.h @@ -91,7 +91,7 @@ class Rdb_cond_var : public rocksdb::TransactionDBCondVar { virtual rocksdb::Status Wait( const std::shared_ptr mutex) override; - // Block current thread until condition variable is notifiesd by a call to + // Block current thread until condition variable is notified by a call to // Notify() or NotifyAll(), or if the timeout is reached. // If timeout is non-negative, operation should be failed after this many // microseconds. diff --git a/storage/rocksdb/rdb_utils.cc b/storage/rocksdb/rdb_utils.cc index ad6fdf5825f..9d14bc65387 100644 --- a/storage/rocksdb/rdb_utils.cc +++ b/storage/rocksdb/rdb_utils.cc @@ -115,7 +115,7 @@ const char *rdb_find_in_string(const char *str, const char *pattern, } } - // Return the character after the found pattern or the null terminateor + // Return the character after the found pattern or the null terminator // if the pattern wasn't found. return str; } @@ -229,7 +229,7 @@ static const std::array rdb_hexdigit = {{'0', '1', '2', '3', '4', '5', /* Convert data into a hex string with optional maximum length. - If the data is larger than the maximum length trancate it and append "..". + If the data is larger than the maximum length truncate it and append "..". */ std::string rdb_hexdump(const char *data, const std::size_t data_len, const std::size_t maxsize) { diff --git a/storage/rocksdb/rdb_utils.h b/storage/rocksdb/rdb_utils.h index 0ef74b9fd06..b0388ec9048 100644 --- a/storage/rocksdb/rdb_utils.h +++ b/storage/rocksdb/rdb_utils.h @@ -48,7 +48,7 @@ namespace myrocks { #endif // interface /* - Introduce C-style pseudo-namespaces, a handy way to make code more readble + Introduce C-style pseudo-namespaces, a handy way to make code more readable when calling into a legacy API, which does not have any namespace defined. Since we cannot or don't want to change the API in any way, we can use this mechanism to define readability tokens that look like C++ namespaces, but are diff --git a/storage/rocksdb/ut0counter.h b/storage/rocksdb/ut0counter.h index 5267508faba..bb5bd8cbce7 100644 --- a/storage/rocksdb/ut0counter.h +++ b/storage/rocksdb/ut0counter.h @@ -82,7 +82,7 @@ struct thread_id_indexer_t : public generic_indexer_t { } }; -/** For counters wher N=1 */ +/** For counters where N=1 */ template struct single_indexer_t { /** Default constructor/destructor should are OK. */ diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc index 06f744155eb..75f0410cb9f 100644 --- a/storage/spider/ha_spider.cc +++ b/storage/spider/ha_spider.cc @@ -4469,7 +4469,7 @@ void ha_spider::position( } if (pt_clone_last_searcher) { - /* sercher is cloned handler */ + /* searcher is cloned handler */ DBUG_PRINT("info",("spider cloned handler access")); pt_clone_last_searcher->position(record); memcpy(ref, pt_clone_last_searcher->ref, ref_length); diff --git a/storage/spider/spd_db_include.h b/storage/spider/spd_db_include.h index fa2f59b9af8..fd8d9c4e4aa 100644 --- a/storage/spider/spd_db_include.h +++ b/storage/spider/spd_db_include.h @@ -1773,7 +1773,7 @@ typedef struct st_spider_result_list longlong second_read; int set_split_read_count; int *casual_read; - /* 0:nomal 1:store 2:store end */ + /* 0:normal 1:store 2:store end */ volatile int quick_phase; bool keyread; diff --git a/storage/spider/spd_param.cc b/storage/spider/spd_param.cc index f3f2f319a21..e3944df7bac 100644 --- a/storage/spider/spd_param.cc +++ b/storage/spider/spd_param.cc @@ -589,7 +589,7 @@ SPIDER_THDVAR_OVERRIDE_VALUE_FUNC(int, reset_sql_alloc) static MYSQL_THDVAR_INT( multi_split_read, /* name */ PLUGIN_VAR_RQCMDARG, /* opt */ - "Sprit read mode for multi range", /* comment */ + "Split read mode for multi range", /* comment */ NULL, /* check */ spider_var_deprecated_int, /* update */ 100, /* def */ diff --git a/storage/spider/spd_ping_table.cc b/storage/spider/spd_ping_table.cc index 1f495e0ca0a..79d3a72da8e 100644 --- a/storage/spider/spd_ping_table.cc +++ b/storage/spider/spd_ping_table.cc @@ -575,7 +575,7 @@ SPIDER_TABLE_MON_LIST *spider_get_ping_table_tgt( tmp_share, name, name_length )) || (*error_num = spider_create_conn_keys(tmp_share)) || - /* Pinally, populate `table_mon_list' with newly created + /* Finally, populate `table_mon_list' with newly created `SPIDER_TABLE_MON's */ (*error_num = spider_get_ping_table_mon( thd, table_mon_list, name, name_length, link_idx, server_id, &mem_root,