mirror of
https://github.com/MariaDB/server.git
synced 2025-04-17 10:37:22 +03:00
Fix typos in C comments inside storage/
This commit is contained in:
parent
3b3c512feb
commit
22efc2c784
@ -215,7 +215,7 @@ int write_header(azio_stream *s)
|
||||
int8store(ptr + AZ_CHECK_POS, (unsigned long long)s->check_point); /* Start of Data Block Index Block */
|
||||
int8store(ptr + AZ_AUTOINCREMENT_POS, (unsigned long long)s->auto_increment); /* Start of Data Block Index Block */
|
||||
int4store(ptr+ AZ_LONGEST_POS , s->longest_row); /* Longest row */
|
||||
int4store(ptr+ AZ_SHORTEST_POS, s->shortest_row); /* Shorest row */
|
||||
int4store(ptr+ AZ_SHORTEST_POS, s->shortest_row); /* Shortest row */
|
||||
int4store(ptr+ AZ_FRM_POS,
|
||||
AZHEADER_SIZE + AZMETA_BUFFER_SIZE); /* FRM position */
|
||||
*(ptr + AZ_DIRTY_POS)= (unsigned char)s->dirty; /* Start of Data Block Index Block */
|
||||
|
@ -41,8 +41,8 @@
|
||||
|
||||
We keep a file pointer open for each instance of ha_archive for each read
|
||||
but for writes we keep one open file handle just for that. We flush it
|
||||
only if we have a read occur. azip handles compressing lots of records
|
||||
at once much better then doing lots of little records between writes.
|
||||
only if we have a read occur. azio handles compressing lots of records
|
||||
at once much better than doing lots of little records between writes.
|
||||
It is possible to not lock on writes but this would then mean we couldn't
|
||||
handle bulk inserts as well (that is if someone was trying to read at
|
||||
the same time since we would want to flush).
|
||||
@ -60,7 +60,7 @@
|
||||
|
||||
At some point a recovery method for such a drastic case needs to be divised.
|
||||
|
||||
Locks are row level, and you will get a consistant read.
|
||||
Locks are row level, and you will get a consistent read.
|
||||
|
||||
For performance as far as table scans go it is quite fast. I don't have
|
||||
good numbers but locally it has out performed both Innodb and MyISAM. For
|
||||
@ -1010,7 +1010,7 @@ int ha_archive::write_row(const uchar *buf)
|
||||
temp_auto= table->next_number_field->val_int();
|
||||
|
||||
/*
|
||||
We don't support decremening auto_increment. They make the performance
|
||||
We don't support decrementing auto_increment. They make the performance
|
||||
just cry.
|
||||
*/
|
||||
if (temp_auto <= share->archive_write.auto_increment &&
|
||||
|
@ -52,7 +52,7 @@ public:
|
||||
/*
|
||||
Version for file format.
|
||||
1 - Initial Version (Never Released)
|
||||
2 - Stream Compression, seperate blobs, no packing
|
||||
2 - Stream Compression, separate blobs, no packing
|
||||
3 - One stream (row and blobs), with packing
|
||||
*/
|
||||
#define ARCHIVE_VERSION 3
|
||||
|
@ -107,7 +107,7 @@ class DllExport ARRAY : public XOBJECT, public CSORT { // Array descblock
|
||||
/* This class is used when constructing the arrays of constants used */
|
||||
/* for indexing. Its only purpose is to provide a way to sort, reduce */
|
||||
/* and reorder the arrays of multicolumn indexes as one block. Indeed */
|
||||
/* sorting the arrays independantly would break the correspondance of */
|
||||
/* sorting the arrays independently would break the correspondence of */
|
||||
/* column values. */
|
||||
/***********************************************************************/
|
||||
class MULAR : public CSORT, public BLOCK { // No need to be an XOBJECT
|
||||
|
@ -95,7 +95,7 @@ CSORT::CSORT(bool cns, int th, int mth)
|
||||
} // end of CSORT constructor
|
||||
|
||||
/***********************************************************************/
|
||||
/* CSORT intialization. */
|
||||
/* CSORT initialization. */
|
||||
/***********************************************************************/
|
||||
int CSORT::Qsort(PGLOBAL g, int nb)
|
||||
{
|
||||
|
@ -339,7 +339,7 @@ int GZFAM::ReadBuffer(PGLOBAL g)
|
||||
*p = '\0'; // Eliminate ending new-line character
|
||||
|
||||
if (*(--p) == '\r')
|
||||
*p = '\0'; // Eliminate eventuel carriage return
|
||||
*p = '\0'; // Eliminate eventual carriage return
|
||||
|
||||
strcpy(Tdbp->GetLine(), To_Buf);
|
||||
IsRead = true;
|
||||
|
@ -352,7 +352,7 @@ int TXTFAM::StoreValues(PGLOBAL g, bool upd)
|
||||
/* record are not necessarily updated in sequential order. */
|
||||
/* Moving intermediate lines cannot be done while making them because */
|
||||
/* this can cause extra wrong records to be included in the new file. */
|
||||
/* What we do here is to reorder the updated records and do all the */
|
||||
/* What we do here is reorder the updated records and do all the */
|
||||
/* updates ordered by record position. */
|
||||
/***********************************************************************/
|
||||
int TXTFAM::UpdateSortedRows(PGLOBAL g)
|
||||
@ -402,7 +402,7 @@ err:
|
||||
/***********************************************************************/
|
||||
/* DeleteSortedRows. When deleting using indexing, the issue is that */
|
||||
/* record are not necessarily deleted in sequential order. Moving */
|
||||
/* intermediate lines cannot be done while deleing them because */
|
||||
/* intermediate lines cannot be done while deleting them because */
|
||||
/* this can cause extra wrong records to be included in the new file. */
|
||||
/* What we do here is to reorder the deleted record and delete from */
|
||||
/* the file from the ordered deleted records. */
|
||||
|
@ -547,7 +547,7 @@ bool FILTER::FindJoinFilter(POPJOIN opj, PFIL fprec, bool teq, bool tek,
|
||||
return (Opc < 0);
|
||||
|
||||
// Keep only equi-joins and specific joins (Outer and Distinct)
|
||||
// Normally specific join operators comme first because they have
|
||||
// Normally specific join operators come first because they have
|
||||
// been placed first by SortJoin.
|
||||
if (teq && Opc > OP_EQ)
|
||||
return FALSE;
|
||||
@ -747,7 +747,7 @@ bool FILTER::CheckHaving(PGLOBAL g, PSQL sqlp)
|
||||
return FALSE;
|
||||
default:
|
||||
if (CheckColumn(g, sqlp, xp, agg) < -1)
|
||||
return TRUE; // Unrecovable error
|
||||
return TRUE; // Unrecoverable error
|
||||
|
||||
break;
|
||||
} // endswitch Opc
|
||||
|
@ -845,7 +845,7 @@ static int yy_get_next_buffer()
|
||||
{ /* Don't try to fill the buffer, so this is an EOF. */
|
||||
if ( yy_c_buf_p - yytext_ptr - YY_MORE_ADJ == 1 )
|
||||
{
|
||||
/* We matched a singled characater, the EOB, so
|
||||
/* We matched a singled character, the EOB, so
|
||||
* treat this as a final EOF.
|
||||
*/
|
||||
return EOB_ACT_END_OF_FILE;
|
||||
|
@ -4990,7 +4990,7 @@ int ha_connect::external_lock(THD *thd, int lock_type)
|
||||
} // endelse Xchk
|
||||
|
||||
if (CloseTable(g)) {
|
||||
// This is an error while builing index
|
||||
// This is an error while building index
|
||||
// Make it a warning to avoid crash
|
||||
push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, g->Message);
|
||||
rc= 0;
|
||||
|
@ -519,7 +519,7 @@ protected:
|
||||
char *GetDBfromName(const char *name);
|
||||
|
||||
// Members
|
||||
static ulong num; // Tracable handler number
|
||||
static ulong num; // Traceable handler number
|
||||
PCONNECT xp; // To user_connect associated class
|
||||
ulong hnum; // The number of this handler
|
||||
query_id_t valid_query_id; // The one when tdbp was allocated
|
||||
|
@ -1318,7 +1318,7 @@ BOOL WritePrivateProfileSection(LPCSTR section,
|
||||
* - note that this means if the buffer was to small to return even just
|
||||
* the first section name then a single '\0' will be returned.
|
||||
* - the return value is the number of characters written in the buffer,
|
||||
* except if the buffer was too smal in which case len-2 is returned
|
||||
* except if the buffer was too small in which case len-2 is returned
|
||||
*
|
||||
* Win2000:
|
||||
* - if the buffer is 0, 1 or 2 characters long then it is filled with
|
||||
|
@ -281,7 +281,7 @@ static CATPARM *AllocCatInfo(PGLOBAL g, CATINFO fid, PCSZ db,
|
||||
cap->Status = (UWORD *)PlugSubAlloc(g, NULL, m * sizeof(UWORD));
|
||||
|
||||
} catch (int n) {
|
||||
htrc("Exeption %d: %s\n", n, g->Message);
|
||||
htrc("Exception %d: %s\n", n, g->Message);
|
||||
cap = NULL;
|
||||
} catch (const char *msg) {
|
||||
htrc(g->Message, msg);
|
||||
|
@ -17,7 +17,7 @@
|
||||
/**************************************************************************/
|
||||
enum FNRC {RC_LICENSE = 7, /* PLGConnect prompt for license key */
|
||||
RC_PASSWD = 6, /* PLGConnect prompt for User/Pwd */
|
||||
RC_SUCWINFO = 5, /* Succes With Info return code */
|
||||
RC_SUCWINFO = 5, /* Success With Info return code */
|
||||
RC_SOCKET = 4, /* RC from PLGConnect to socket DLL */
|
||||
RC_PROMPT = 3, /* Intermediate prompt return */
|
||||
RC_CANCEL = 2, /* Command was cancelled by user */
|
||||
@ -25,7 +25,7 @@ enum FNRC {RC_LICENSE = 7, /* PLGConnect prompt for license key */
|
||||
RC_SUCCESS = 0, /* Successful function (must be 0) */
|
||||
RC_MEMORY = -1, /* Storage allocation error */
|
||||
RC_TRUNCATED = -2, /* Result has been truncated */
|
||||
RC_TIMEOUT = -3, /* Connection timeout occurred */
|
||||
RC_TIMEOUT = -3, /* Connection timeout occurred */
|
||||
RC_TOOBIG = -4, /* Data is too big for connection */
|
||||
RC_KEY = -5, /* Null ptr to key in Connect */
|
||||
/* or bad key in other functions */
|
||||
|
@ -535,7 +535,7 @@ enum XFLD {FLD_NO = 0, /* Not a field definition item */
|
||||
FLD_KEY = 11, /* Field key property */
|
||||
FLD_DEFAULT = 12, /* Field default value */
|
||||
FLD_EXTRA = 13, /* Field extra info */
|
||||
FLD_PRIV = 14, /* Field priviledges */
|
||||
FLD_PRIV = 14, /* Field privileges */
|
||||
FLD_DATEFMT = 15, /* Field date format */
|
||||
FLD_FORMAT = 16, /* Field format */
|
||||
FLD_CAT = 17, /* Table catalog */
|
||||
|
@ -496,17 +496,17 @@ bool PlugEvalLike(PGLOBAL g, LPCSTR strg, LPCSTR pat, bool ci)
|
||||
/* */
|
||||
/* The Like predicate is true if: */
|
||||
/* */
|
||||
/* 1- A subtring of M is a sequence of 0 or more contiguous <CR> of M */
|
||||
/* 1- A substring of M is a sequence of 0 or more contiguous <CR> of M*/
|
||||
/* and each <CR> of M is part of exactly one substring. */
|
||||
/* */
|
||||
/* 2- If the i-th <subtring-specifyer> of P is an <arbitrary-char- */
|
||||
/* specifier>, the i-th subtring of M is any single <CR>. */
|
||||
/* 2- If the i-th <substring-specifier> of P is an <arbitrary-char- */
|
||||
/* specifier>, the i-th substring of M is any single <CR>. */
|
||||
/* */
|
||||
/* 3- If the i-th <subtring-specifyer> of P is an <arbitrary-string- */
|
||||
/* specifier>, then the i-th subtring of M is any sequence of zero */
|
||||
/* 3- If the i-th <substring-specifier> of P is an <arbitrary-string- */
|
||||
/* specifier>, then the i-th substring of M is any sequence of zero*/
|
||||
/* or more <CR>. */
|
||||
/* */
|
||||
/* 4- If the i-th <subtring-specifyer> of P is neither an <arbitrary- */
|
||||
/* 4- If the i-th <substring-specifier> of P is neither an <arbitrary-*/
|
||||
/* character-specifier> nor an <arbitrary-string-specifier>, then */
|
||||
/* the i-th substring of M is equal to that <substring-specifier> */
|
||||
/* according to the collating sequence of the <like-predicate>, */
|
||||
@ -514,7 +514,7 @@ bool PlugEvalLike(PGLOBAL g, LPCSTR strg, LPCSTR pat, bool ci)
|
||||
/* length as that <substring-specifier>. */
|
||||
/* */
|
||||
/* 5- The number of substrings of M is equal to the number of */
|
||||
/* <subtring-specifiers> of P. */
|
||||
/* <substring-specifiers> of P. */
|
||||
/* */
|
||||
/* Otherwise M like P is false. */
|
||||
/***********************************************************************/
|
||||
@ -572,7 +572,7 @@ bool EvalLikePattern(LPCSTR sp, LPCSTR tp)
|
||||
b = (t || !*sp); /* true if % or void strg. */
|
||||
else if (!t) {
|
||||
/*******************************************************************/
|
||||
/* No character to skip, check occurrence of <subtring-specifier> */
|
||||
/* No character to skip, check occurrence of <substring-specifier>*/
|
||||
/* at the very beginning of remaining string. */
|
||||
/*******************************************************************/
|
||||
if (p) {
|
||||
@ -586,7 +586,7 @@ bool EvalLikePattern(LPCSTR sp, LPCSTR tp)
|
||||
if (p)
|
||||
/*****************************************************************/
|
||||
/* Here is the case explaining why we need a recursive routine. */
|
||||
/* The test must be done not only against the first occurrence */
|
||||
/* The test must be done not only against the first occurrence */
|
||||
/* of the <substring-specifier> in the remaining string, */
|
||||
/* but also with all eventual succeeding ones. */
|
||||
/*****************************************************************/
|
||||
@ -1080,7 +1080,7 @@ DllExport PSZ GetIniString(PGLOBAL g, void *mp, LPCSTR sec, LPCSTR key,
|
||||
#endif // 0
|
||||
|
||||
/***********************************************************************/
|
||||
/* GetAmName: return the name correponding to an AM code. */
|
||||
/* GetAmName: return the name corresponding to an AM code. */
|
||||
/***********************************************************************/
|
||||
char *GetAmName(PGLOBAL g, AMT am, void *memp)
|
||||
{
|
||||
|
@ -171,7 +171,7 @@ void XMLNODE::Delete(PXNODE dnp)
|
||||
} // end of Delete
|
||||
|
||||
/******************************************************************/
|
||||
/* Store a string in Buf, enventually reallocating it. */
|
||||
/* Store a string in Buf, eventually reallocating it. */
|
||||
/******************************************************************/
|
||||
char *XMLNODE::BufAlloc(PGLOBAL g, const char *p, int n)
|
||||
{
|
||||
|
@ -446,7 +446,7 @@ int TABDEF::GetColCatInfo(PGLOBAL g)
|
||||
//case RECFM_OCCUR:
|
||||
//case RECFM_PRX:
|
||||
case RECFM_OEM:
|
||||
poff = 0; // Offset represents an independant flag
|
||||
poff = 0; // Offset represents an independent flag
|
||||
break;
|
||||
default: // PLG ODBC JDBC MYSQL WMI...
|
||||
poff = 0; // NA
|
||||
|
@ -152,14 +152,14 @@ class DllExport OEMDEF : public TABDEF { /* OEM table */
|
||||
void *Hdll; /* Handle for the loaded shared library */
|
||||
#endif // !_WIN32
|
||||
PTABDEF Pxdef; /* Pointer to the external TABDEF class */
|
||||
char *Module; /* Path/Name of the DLL implenting it */
|
||||
char *Module; /* Path/Name of the DLL implementing it */
|
||||
char *Subtype; /* The name of the OEM table sub type */
|
||||
}; // end of OEMDEF
|
||||
|
||||
/***********************************************************************/
|
||||
/* Column definition block used during creation. */
|
||||
/***********************************************************************/
|
||||
class DllExport COLCRT : public BLOCK { /* Column description block */
|
||||
class DllExport COLCRT : public BLOCK { /* Column description block */
|
||||
friend class TABDEF;
|
||||
public:
|
||||
COLCRT(PSZ name); // Constructor
|
||||
|
@ -186,7 +186,7 @@ bool DOSDEF::DefineAM(PGLOBAL g, LPCSTR am, int)
|
||||
} // end of DefineAM
|
||||
|
||||
/***********************************************************************/
|
||||
/* Get the full path/name of the optization file. */
|
||||
/* Get the full path/name of the optimization file. */
|
||||
/***********************************************************************/
|
||||
bool DOSDEF::GetOptFileName(PGLOBAL g, char *filename)
|
||||
{
|
||||
@ -210,7 +210,7 @@ bool DOSDEF::GetOptFileName(PGLOBAL g, char *filename)
|
||||
} // end of GetOptFileName
|
||||
|
||||
/***********************************************************************/
|
||||
/* After an optimize error occurred, remove all set optimize values. */
|
||||
/* After an optimize error occurred, remove all set optimize values. */
|
||||
/***********************************************************************/
|
||||
void DOSDEF::RemoveOptValues(PGLOBAL g)
|
||||
{
|
||||
@ -1619,7 +1619,7 @@ void TDBDOS::ResetBlockFilter(PGLOBAL g)
|
||||
/* RC_OK: if some records in the block can meet filter criteria. */
|
||||
/* RC_NF: if no record in the block can meet filter criteria. */
|
||||
/* RC_EF: if no record in the remaining file can meet filter criteria.*/
|
||||
/* In addition, temporarily supress filtering if all the records in */
|
||||
/* In addition, temporarily suppress filtering if all the records in */
|
||||
/* the block meet filter criteria. */
|
||||
/***********************************************************************/
|
||||
int TDBDOS::TestBlock(PGLOBAL g)
|
||||
|
@ -1219,7 +1219,7 @@ PCOL TDBFMT::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
|
||||
|
||||
/***********************************************************************/
|
||||
/* FMT EstimatedLength. Returns an estimated minimum line length. */
|
||||
/* The big problem here is how can we astimated that minimum ? */
|
||||
/* The big problem here is how can we estimate that minimum ? */
|
||||
/***********************************************************************/
|
||||
int TDBFMT::EstimatedLength(void)
|
||||
{
|
||||
|
@ -289,7 +289,7 @@ PTDB JDBCDEF::GetTable(PGLOBAL g, MODE m)
|
||||
/* containing the entire result of the executed query. This can be an */
|
||||
/* issue for big tables and memory error can occur. An alternative is */
|
||||
/* to use streaming (reading one row at a time) but to specify this, */
|
||||
/* a fech size of the integer min value must be send to the driver. */
|
||||
/* a fetch size of the integer min value must be send to the driver. */
|
||||
/***********************************************************************/
|
||||
int JDBCPARM::CheckSize(int rows)
|
||||
{
|
||||
|
@ -676,7 +676,7 @@ char* TDBDIR::Path(PGLOBAL g)
|
||||
PlugSetPath(Fpath, To_File, defp ? defp->GetPath() : NULL);
|
||||
_splitpath(Fpath, Drive, Direc, Fname, Ftype);
|
||||
} else
|
||||
_makepath(Fpath, Drive, Direc, Fname, Ftype); // Usefull for TDBSDR
|
||||
_makepath(Fpath, Drive, Direc, Fname, Ftype); // Useful for TDBSDR
|
||||
|
||||
return Fpath;
|
||||
#else // !_WIN32
|
||||
|
@ -265,7 +265,7 @@ PQRYRES PIVAID::MakePivotColumns(PGLOBAL g)
|
||||
ndif = qrp->Nblin;
|
||||
} // endif Tabsrc
|
||||
|
||||
// Allocate the Value used to retieve column names
|
||||
// Allocate the Value used to retrieve column names
|
||||
if (!(valp = AllocateValue(g, Rblkp->GetType(),
|
||||
Rblkp->GetVlen(),
|
||||
Rblkp->GetPrec())))
|
||||
|
@ -95,7 +95,7 @@ int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename)
|
||||
char fn[600];
|
||||
pid_t pID;
|
||||
|
||||
// Check if curl package is availabe by executing subprocess
|
||||
// Check if curl package is available by executing subprocess
|
||||
FILE *f= popen("command -v curl", "r");
|
||||
|
||||
if (!f) {
|
||||
|
@ -290,7 +290,7 @@ PCOL TDBVCT::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n)
|
||||
/***********************************************************************/
|
||||
bool TDBVCT::IsUsingTemp(PGLOBAL)
|
||||
{
|
||||
// For developpers
|
||||
// For developers
|
||||
return (UseTemp() == TMP_TEST);
|
||||
} // end of IsUsingTemp
|
||||
|
||||
|
@ -599,7 +599,7 @@ int TDBWMI::GetMaxSize(PGLOBAL g)
|
||||
/*******************************************************************/
|
||||
/* Loop enumerating to get the count. This is prone to last a */
|
||||
/* very long time for some classes such as DataFile, this is why */
|
||||
/* we just return an estimated value that will be ajusted later. */
|
||||
/* we just return an estimated value that will be adjusted later. */
|
||||
/*******************************************************************/
|
||||
MaxSize = Ems;
|
||||
#if 0
|
||||
@ -619,7 +619,7 @@ int TDBWMI::GetMaxSize(PGLOBAL g)
|
||||
break;
|
||||
|
||||
MaxSize++;
|
||||
} // endwile Enumerator
|
||||
} // endwhile Enumerator
|
||||
|
||||
Res = Enumerator->Reset();
|
||||
#endif // 0
|
||||
|
@ -382,7 +382,7 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info)
|
||||
xcp->Found = false;
|
||||
} // endfor xcp
|
||||
|
||||
} // endor i
|
||||
} // endfor i
|
||||
|
||||
txmp->CloseDB(g);
|
||||
|
||||
@ -818,7 +818,7 @@ int TDBXML::LoadTableFile(PGLOBAL g, char *filename)
|
||||
|
||||
/***********************************************************************/
|
||||
/* Initialize the processing of the XML file. */
|
||||
/* Note: this function can be called several times, eventally before */
|
||||
/* Note: this function can be called several times, eventually before */
|
||||
/* the columns are known (from TBL for instance) */
|
||||
/***********************************************************************/
|
||||
bool TDBXML::Initialize(PGLOBAL g)
|
||||
|
@ -53,8 +53,8 @@
|
||||
Oct-2009 - Mathias Svensson - Fixed problem if uncompressed size was > 4G and compressed size was <4G
|
||||
should only read the compressed/uncompressed size from the Zip64 format if
|
||||
the size from normal header was 0xFFFFFFFF
|
||||
Oct-2009 - Mathias Svensson - Applied some bug fixes from paches recived from Gilles Vollant
|
||||
Oct-2009 - Mathias Svensson - Applied support to unzip files with compression mathod BZIP2 (bzip2 lib is required)
|
||||
Oct-2009 - Mathias Svensson - Applied some bug fixes from patches received from Gilles Vollant
|
||||
Oct-2009 - Mathias Svensson - Applied support to unzip files with compression method BZIP2 (bzip2 lib is required)
|
||||
Patch created by Daniel Borca
|
||||
|
||||
Jan-2010 - back to unzip and minizip 1.0 name scheme, with compatibility layer
|
||||
@ -847,7 +847,7 @@ extern int ZEXPORT unzGetGlobalInfo (unzFile file, unz_global_info* pglobal_info
|
||||
return UNZ_OK;
|
||||
}
|
||||
/*
|
||||
Translate date/time from Dos format to tm_unz (readable more easilty)
|
||||
Translate date/time from Dos format to tm_unz (readable more easily)
|
||||
*/
|
||||
local void unz64local_DosDateToTmuDate (ZPOS64_T ulDosDate, tm_unz* ptm)
|
||||
{
|
||||
|
@ -20,8 +20,8 @@
|
||||
Implements the user_connect class.
|
||||
|
||||
@details
|
||||
To support multi_threading, each query creates and use a PlugDB "user"
|
||||
that is a connection with its personnal memory allocation.
|
||||
To support multi_threading, each query creates and uses a PlugDB "user"
|
||||
that is a connection with its private memory allocation.
|
||||
|
||||
@note
|
||||
Author Olivier Bertrand
|
||||
|
@ -1775,7 +1775,7 @@ DECVAL::DECVAL(PGLOBAL g, PSZ s, int n, int prec, bool uns)
|
||||
} // end of DECVAL constructor
|
||||
|
||||
/***********************************************************************/
|
||||
/* DECIMAL: Check whether the numerica value is equal to 0. */
|
||||
/* DECIMAL: Check whether the numerical value is equal to 0. */
|
||||
/***********************************************************************/
|
||||
bool DECVAL::IsZero(void)
|
||||
{
|
||||
|
@ -40,7 +40,7 @@ typedef struct index_val : public BLOCK {
|
||||
index_val(PXOB xp) {Next = NULL; Xval = xp; Kp = NULL;}
|
||||
PIVAL Next; // Next value
|
||||
PXOB Xval; // To value or array
|
||||
int *Kp; // The coordonates in a LSTBLK
|
||||
int *Kp; // The coordinates in a LSTBLK
|
||||
} IVAL;
|
||||
|
||||
typedef struct index_col : public BLOCK {
|
||||
|
@ -299,7 +299,7 @@ error:
|
||||
DESCRIPTION
|
||||
|
||||
Read the meta-file info. For now we are only interested in
|
||||
rows counf, crashed bit and magic number.
|
||||
rows count, crashed bit and magic number.
|
||||
|
||||
RETURN
|
||||
0 - OK
|
||||
@ -1006,7 +1006,7 @@ int ha_tina::open(const char *name, int mode, uint open_options)
|
||||
|
||||
|
||||
/*
|
||||
Close a database file. We remove ourselves from the shared strucutre.
|
||||
Close a database file. We remove ourselves from the shared structure.
|
||||
If it is empty we destroy it.
|
||||
*/
|
||||
int ha_tina::close(void)
|
||||
@ -1292,7 +1292,7 @@ void ha_tina::position(const uchar *record)
|
||||
|
||||
|
||||
/*
|
||||
Used to fetch a row from a posiion stored with ::position().
|
||||
Used to fetch a row from a position stored with ::position().
|
||||
my_get_ptr() retrieves the data for you.
|
||||
*/
|
||||
|
||||
@ -1397,7 +1397,7 @@ int ha_tina::rnd_end()
|
||||
|
||||
/*
|
||||
The sort is needed when there were updates/deletes with random orders.
|
||||
It sorts so that we move the firts blocks to the beginning.
|
||||
It sorts so that we move the first blocks to the beginning.
|
||||
*/
|
||||
my_qsort(chain, (size_t)(chain_ptr - chain), sizeof(tina_set),
|
||||
(qsort_cmp)sort_set);
|
||||
|
@ -284,7 +284,7 @@
|
||||
-------
|
||||
|
||||
There is a test for MySQL Federated Storage Handler in ./mysql-test/t,
|
||||
federatedd.test It starts both a slave and master database using
|
||||
federated.test It starts both a slave and master database using
|
||||
the same setup that the replication tests use, with the exception that
|
||||
it turns off replication, and sets replication to ignore the test tables.
|
||||
After ensuring that you actually do have support for the federated storage
|
||||
@ -3268,7 +3268,7 @@ bool ha_federated::get_error_message(int error, String* buf)
|
||||
@details Call @c mysql_store_result() to save a result set then
|
||||
append it to the stored results array.
|
||||
|
||||
@param[in] mysql_arg MySLQ connection structure.
|
||||
@param[in] mysql_arg MySQL connection structure.
|
||||
|
||||
@return Stored result set (MYSQL_RES object).
|
||||
*/
|
||||
|
@ -198,7 +198,7 @@ public:
|
||||
}
|
||||
const key_map *keys_to_use_for_scanning() override { return &key_map_full; }
|
||||
/*
|
||||
Everything below are methods that we implment in ha_federated.cc.
|
||||
Everything below are methods that we implement in ha_federated.cc.
|
||||
|
||||
Most of these methods are not obligatory, skip them and
|
||||
MySQL will treat them as not implemented
|
||||
|
@ -24,8 +24,8 @@ static void init_block(HP_BLOCK *block, size_t reclength, ulong min_records,
|
||||
|
||||
/*
|
||||
In how many parts are we going to do allocations of memory and indexes
|
||||
If we assigne 1M to the heap table memory, we will allocate roughly
|
||||
(1M/16) bytes per allocaiton
|
||||
If we assign 1M to the heap table memory, we will allocate roughly
|
||||
(1M/16) bytes per allocation
|
||||
*/
|
||||
static const int heap_allocation_parts= 16;
|
||||
|
||||
@ -361,7 +361,7 @@ static void init_block(HP_BLOCK *block, size_t reclength, ulong min_records,
|
||||
block->records_in_block= records_in_block;
|
||||
block->recbuffer= recbuffer;
|
||||
block->last_allocated= 0L;
|
||||
/* All alloctions are done with this size, if possible */
|
||||
/* All allocations are done with this size, if possible */
|
||||
block->alloc_size= alloc_size - MALLOC_OVERHEAD;
|
||||
|
||||
for (i= 0; i <= HP_MAX_LEVELS; i++)
|
||||
|
@ -375,7 +375,7 @@ ulong hp_rec_hashnr(register HP_KEYDEF *keydef, register const uchar *rec)
|
||||
|
||||
RETURN
|
||||
0 Key is identical
|
||||
<> 0 Key differes
|
||||
<> 0 Key differs
|
||||
*/
|
||||
|
||||
int hp_rec_key_cmp(HP_KEYDEF *keydef, const uchar *rec1, const uchar *rec2)
|
||||
|
@ -67,7 +67,7 @@ either in S (shared) or X (exclusive) mode and block->lock was not acquired on
|
||||
node pointer pages.
|
||||
|
||||
After MariaDB 10.2.2, block->lock S-latch or X-latch is used to protect
|
||||
node pointer pages and obtaiment of node pointer page latches is protected by
|
||||
node pointer pages and obtainment of node pointer page latches is protected by
|
||||
index->lock.
|
||||
|
||||
(0) Definition: B-tree level.
|
||||
@ -130,7 +130,7 @@ NOTE: New rules after MariaDB 10.2.2 does not affect the latching rules of leaf
|
||||
|
||||
index->lock S-latch is needed in read for the node pointer traversal. When the leaf
|
||||
level is reached, index-lock can be released (and with the MariaDB 10.2.2 changes, all
|
||||
node pointer latches). Left to right index travelsal in leaf page level can be safely done
|
||||
node pointer latches). Left to right index traversal in leaf page level can be safely done
|
||||
by obtaining right sibling leaf page latch and then releasing the old page latch.
|
||||
|
||||
Single leaf page modifications (BTR_MODIFY_LEAF) are protected by index->lock
|
||||
|
@ -5639,7 +5639,7 @@ static void btr_blob_free(buf_block_t *block, bool all, mtr_t *mtr)
|
||||
|
||||
if (!buf_LRU_free_page(&block->page, all) && all && block->page.zip.data)
|
||||
/* Attempt to deallocate the redundant copy of the uncompressed page
|
||||
if the whole ROW_FORMAT=COMPRESSED block cannot be deallocted. */
|
||||
if the whole ROW_FORMAT=COMPRESSED block cannot be deallocated. */
|
||||
buf_LRU_free_page(&block->page, false);
|
||||
|
||||
mysql_mutex_unlock(&buf_pool.mutex);
|
||||
|
@ -971,7 +971,7 @@ ATTRIBUTE_COLD void buf_mem_pressure_shutdown()
|
||||
#if defined(DBUG_OFF) && defined(HAVE_MADVISE) && defined(MADV_DODUMP)
|
||||
/** Enable buffers to be dumped to core files
|
||||
|
||||
A convience function, not called anyhwere directly however
|
||||
A convenience function, not called anywhere directly however
|
||||
it is left available for gdb or any debugger to call
|
||||
in the event that you want all of the memory to be dumped
|
||||
to a core file.
|
||||
@ -2841,7 +2841,7 @@ loop:
|
||||
well as error handling takes place at a lower level.
|
||||
Here we only need to know whether the page really is
|
||||
corrupted, or if an encrypted page with a valid
|
||||
checksum cannot be decypted. */
|
||||
checksum cannot be decrypted. */
|
||||
|
||||
switch (dberr_t local_err = buf_read_page(page_id, chain)) {
|
||||
case DB_SUCCESS:
|
||||
|
@ -19,7 +19,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
|
||||
|
||||
/**************************************************//**
|
||||
@file buf/buf0dblwr.cc
|
||||
Doublwrite buffer module
|
||||
Doublewrite buffer module
|
||||
|
||||
Created 2011/12/19
|
||||
*******************************************************/
|
||||
|
@ -126,7 +126,7 @@ void buf_pool_t::page_cleaner_wakeup(bool for_LRU) noexcept
|
||||
|
||||
/* if pct_lwm != 0.0, adaptive flushing is enabled.
|
||||
signal buf page cleaner thread
|
||||
- if pct_lwm <= dirty_pct then it will invoke apdative flushing flow
|
||||
- if pct_lwm <= dirty_pct then it will invoke adaptive flushing flow
|
||||
- if pct_lwm > dirty_pct then it will invoke idle flushing flow.
|
||||
|
||||
idle_flushing:
|
||||
@ -2221,7 +2221,7 @@ static void buf_flush_sync_for_checkpoint(lsn_t lsn) noexcept
|
||||
mysql_mutex_unlock(&buf_pool.flush_list_mutex);
|
||||
}
|
||||
|
||||
/** Check if the adpative flushing threshold is recommended based on
|
||||
/** Check if the adaptive flushing threshold is recommended based on
|
||||
redo log capacity filled threshold.
|
||||
@param oldest_lsn buf_pool.get_oldest_modification()
|
||||
@return true if adaptive flushing is recommended. */
|
||||
|
@ -880,7 +880,7 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index,
|
||||
n_sample_pages = srv_stats_transient_sample_pages;
|
||||
}
|
||||
} else {
|
||||
/* New logaritmic number of pages that are estimated.
|
||||
/* New logarithmic number of pages that are estimated.
|
||||
Number of pages estimated should be between 1 and
|
||||
index->stat_index_size.
|
||||
|
||||
|
@ -456,7 +456,7 @@ static bool fil_node_open_file(fil_node_t *node, const byte *page, bool no_lsn)
|
||||
}
|
||||
}
|
||||
|
||||
/* The node can be opened beween releasing and acquiring fil_system.mutex
|
||||
/* The node can be opened between releasing and acquiring fil_system.mutex
|
||||
in the above code */
|
||||
return node->is_open() || fil_node_open_file_low(node, page, no_lsn);
|
||||
}
|
||||
@ -2162,7 +2162,7 @@ func_exit:
|
||||
df_remote.init(flags);
|
||||
|
||||
/* Discover the correct file by looking in three possible locations
|
||||
while avoiding unecessary effort. */
|
||||
while avoiding unnecessary effort. */
|
||||
|
||||
/* We will always look for an ibd in the default location. */
|
||||
df_default.make_filepath(nullptr, name, IBD);
|
||||
@ -2501,7 +2501,7 @@ bool fil_crypt_check(fil_space_crypt_t *crypt_data, const char *f_name)
|
||||
/** Open an ibd tablespace and add it to the InnoDB data structures.
|
||||
This is similar to fil_ibd_open() except that it is used while processing
|
||||
the REDO log, so the data dictionary is not available and very little
|
||||
validation is done. The tablespace name is extracred from the
|
||||
validation is done. The tablespace name is extracted from the
|
||||
dbname/tablename.ibd portion of the filename, which assumes that the file
|
||||
is a file-per-table tablespace. Any name will do for now. General
|
||||
tablespace names will be read from the dictionary after it has been
|
||||
|
@ -292,7 +292,7 @@ static ulint fil_page_compress_for_non_full_crc32(
|
||||
mach_write_to_2(out_buf + FIL_PAGE_TYPE, FIL_PAGE_PAGE_COMPRESSED);
|
||||
}
|
||||
|
||||
/* Set up the actual payload lenght */
|
||||
/* Set up the actual payload length */
|
||||
mach_write_to_2(out_buf + FIL_PAGE_DATA + FIL_PAGE_COMP_SIZE,
|
||||
write_size);
|
||||
|
||||
|
@ -1535,7 +1535,7 @@ MY_ATTRIBUTE((nonnull(1,4,5), warn_unused_result))
|
||||
@param[out] block inode block
|
||||
@param[out] err error code
|
||||
@return segment inode, page x-latched
|
||||
@retrval nullptr if the inode is free or corruption was noticed */
|
||||
@retval nullptr if the inode is free or corruption was noticed */
|
||||
static
|
||||
fseg_inode_t*
|
||||
fseg_inode_try_get(
|
||||
@ -2075,7 +2075,7 @@ take_hinted_page:
|
||||
}
|
||||
|
||||
/** If the number of unused but reserved pages in a segment is
|
||||
esser than minimum value of 1/8 of reserved pages or
|
||||
less than minimum value of 1/8 of reserved pages or
|
||||
4 * FSP_EXTENT_SIZE and there are at least half of extent size
|
||||
used pages, then we allow a new empty extent to be added to
|
||||
the segment in fseg_alloc_free_page_general(). Otherwise, we use
|
||||
@ -3306,7 +3306,7 @@ func_exit:
|
||||
{
|
||||
/* Update the FLST_LAST pointer in base node with current
|
||||
valid extent descriptor and mark the FIL_NULL as next in
|
||||
current extent descriptr */
|
||||
current extent descriptor */
|
||||
flst_write_addr(
|
||||
*header,
|
||||
header->page.frame + hdr_offset + FLST_LAST,
|
||||
|
@ -953,9 +953,9 @@ SysTablespace::open_or_create(
|
||||
}
|
||||
}
|
||||
|
||||
/* Close the curent handles, add space and file info to the
|
||||
/* Close the current handles, add space and file info to the
|
||||
fil_system cache and the Data Dictionary, and re-open them
|
||||
in file_system cache so that they stay open until shutdown. */
|
||||
in fil_system cache so that they stay open until shutdown. */
|
||||
mysql_mutex_lock(&fil_system.mutex);
|
||||
ulint node_counter = 0;
|
||||
for (files_t::iterator it = begin; it != end; ++it) {
|
||||
|
@ -688,7 +688,7 @@ fts_ast_visit(
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Process leaf node accroding to its pass.*/
|
||||
/* Process leaf node according to its pass.*/
|
||||
if (oper == FTS_EXIST_SKIP
|
||||
&& visit_pass == FTS_PASS_EXIST) {
|
||||
error = visitor(FTS_EXIST, node, arg);
|
||||
|
@ -177,7 +177,7 @@ static const char* fts_config_table_insert_values_sql =
|
||||
FTS_TABLE_STATE "', '0');\n"
|
||||
"END;\n";
|
||||
|
||||
/** FTS tokenize parmameter for plugin parser */
|
||||
/** FTS tokenize parameter for plugin parser */
|
||||
struct fts_tokenize_param_t {
|
||||
fts_doc_t* result_doc; /*!< Result doc for tokens */
|
||||
ulint add_pos; /*!< Added position for tokens */
|
||||
@ -2032,7 +2032,7 @@ fts_create_one_index_table(
|
||||
FTS_INDEX_DOC_COUNT_LEN);
|
||||
|
||||
/* The precise type calculation is as follows:
|
||||
least signficiant byte: MySQL type code (not applicable for sys cols)
|
||||
least significant byte: MySQL type code (not applicable for sys cols)
|
||||
second least : DATA_NOT_NULL | DATA_BINARY_TYPE
|
||||
third least : the MySQL charset-collation code (DATA_MTYPE_MAX) */
|
||||
|
||||
@ -4423,7 +4423,7 @@ or greater than fts_max_token_size.
|
||||
@param[in] stopwords stopwords rb tree
|
||||
@param[in] cs token charset
|
||||
@retval true if it is not stopword and length in range
|
||||
@retval false if it is stopword or lenght not in range */
|
||||
@retval false if it is stopword or length not in range */
|
||||
bool
|
||||
fts_check_token(
|
||||
const fts_string_t* token,
|
||||
|
@ -117,10 +117,10 @@ struct fts_zip_t {
|
||||
fts_string_t word; /*!< UTF-8 string */
|
||||
|
||||
ulint max_words; /*!< maximum number of words to read
|
||||
in one pase */
|
||||
in one pass */
|
||||
};
|
||||
|
||||
/** Prepared statemets used during optimize */
|
||||
/** Prepared statements used during optimize */
|
||||
struct fts_optimize_graph_t {
|
||||
/*!< Delete a word from FTS INDEX */
|
||||
que_t* delete_nodes_graph;
|
||||
@ -172,7 +172,7 @@ struct fts_optimize_t {
|
||||
ulint n_completed; /*!< Number of FTS indexes that have
|
||||
been optimized */
|
||||
ibool del_list_regenerated;
|
||||
/*!< BEING_DELETED list regenarated */
|
||||
/*!< BEING_DELETED list regenerated */
|
||||
};
|
||||
|
||||
/** Used by the optimize, to keep state during compacting nodes. */
|
||||
@ -2245,7 +2245,7 @@ fts_optimize_read_deleted_doc_id_snapshot(
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Optimze all the FTS indexes, skipping those that have already been
|
||||
Optimize all the FTS indexes, skipping those that have already been
|
||||
optimized, since the FTS auxiliary indexes are not guaranteed to be
|
||||
of the same cardinality.
|
||||
@return DB_SUCCESS if all OK */
|
||||
|
@ -199,7 +199,7 @@ struct fts_proximity_t {
|
||||
of the range */
|
||||
};
|
||||
|
||||
/** The match positions and tokesn to match */
|
||||
/** The match positions and tokens to match */
|
||||
struct fts_phrase_t {
|
||||
fts_phrase_t(const dict_table_t* table)
|
||||
:
|
||||
@ -244,14 +244,14 @@ struct fts_phrase_t {
|
||||
st_mysql_ftparser* parser;
|
||||
};
|
||||
|
||||
/** Paramter passed to fts phrase match by parser */
|
||||
/** Parameter passed to fts phrase match by parser */
|
||||
struct fts_phrase_param_t {
|
||||
fts_phrase_t* phrase; /*!< Match phrase instance */
|
||||
ulint token_index; /*!< Index of token to match next */
|
||||
mem_heap_t* heap; /*!< Heap for word processing */
|
||||
};
|
||||
|
||||
/** For storing the frequncy of a word/term in a document */
|
||||
/** For storing the frequency of a word/term in a document */
|
||||
struct fts_doc_freq_t {
|
||||
doc_id_t doc_id; /*!< Document id */
|
||||
ulint freq; /*!< Frequency of a word in a document */
|
||||
@ -433,7 +433,7 @@ fts_query_lcs(
|
||||
|
||||
/* Traverse the table backwards, from the last row to the first and
|
||||
also from the last column to the first. We compute the smaller
|
||||
common subsequeces first, then use the caluclated values to determine
|
||||
common subsequences first, then use the calculated values to determine
|
||||
the longest common subsequence. The result will be in TABLE[0][0]. */
|
||||
for (i = r; i >= 0; --i) {
|
||||
int j;
|
||||
@ -762,7 +762,7 @@ fts_query_remove_doc_id(
|
||||
}
|
||||
|
||||
/*******************************************************************//**
|
||||
Find the doc id in the query set but not in the deleted set, artificialy
|
||||
Find the doc id in the query set but not in the deleted set, artificially
|
||||
downgrade or upgrade its ranking by a value and make/initialize its ranking
|
||||
under or above its normal range 0 to 1. This is used for Boolean Search
|
||||
operator such as Negation operator, which makes word's contribution to the
|
||||
@ -822,7 +822,7 @@ fts_query_intersect_doc_id(
|
||||
2. 'a +b': docs match 'a' is in doc_ids, add doc into intersect
|
||||
if it matches 'b'. if the doc is also in doc_ids, then change the
|
||||
doc's rank, and add 'a' in doc's words.
|
||||
3. '+a +b': docs matching '+a' is in doc_ids, add doc into intsersect
|
||||
3. '+a +b': docs matching '+a' is in doc_ids, add doc into intersect
|
||||
if it matches 'b' and it's in doc_ids.(multi_exist = true). */
|
||||
|
||||
/* Check if the doc id is deleted and it's in our set */
|
||||
@ -1439,7 +1439,7 @@ fts_query_union(
|
||||
/* The size can't decrease. */
|
||||
ut_a(rbt_size(query->doc_ids) >= n_doc_ids);
|
||||
|
||||
/* Calulate the number of doc ids that were added to
|
||||
/* Calculate the number of doc ids that were added to
|
||||
the current doc id set. */
|
||||
if (query->doc_ids) {
|
||||
n_doc_ids = rbt_size(query->doc_ids) - n_doc_ids;
|
||||
@ -2688,7 +2688,7 @@ fts_query_phrase_split(
|
||||
cache->stopword_info.cached_stopword,
|
||||
query->fts_index_table.charset)) {
|
||||
/* Add the word to the RB tree so that we can
|
||||
calculate it's frequencey within a document. */
|
||||
calculate its frequency within a document. */
|
||||
fts_query_add_word_freq(query, token);
|
||||
} else {
|
||||
ib_vector_pop(tokens);
|
||||
@ -3385,7 +3385,7 @@ fts_query_read_node(
|
||||
/* Start from 1 since the first column has been read by the caller.
|
||||
Also, we rely on the order of the columns projected, to filter
|
||||
out ilists that are out of range and we always want to read
|
||||
the doc_count irrespective of the suitablility of the row. */
|
||||
the doc_count irrespective of the suitability of the row. */
|
||||
|
||||
for (i = 1; exp && !skip; exp = que_node_get_next(exp), ++i) {
|
||||
|
||||
|
@ -115,7 +115,7 @@ Parse an SQL string.
|
||||
que_t*
|
||||
fts_parse_sql(
|
||||
/*==========*/
|
||||
fts_table_t* fts_table, /*!< in: FTS auxiliarry table info */
|
||||
fts_table_t* fts_table, /*!< in: FTS auxiliary table info */
|
||||
pars_info_t* info, /*!< in: info struct, or NULL */
|
||||
const char* sql) /*!< in: SQL string to evaluate */
|
||||
{
|
||||
|
@ -74,7 +74,7 @@ rtr_page_split_initialize_nodes(
|
||||
n_recs = ulint(page_get_n_recs(page)) + 1;
|
||||
|
||||
/*We reserve 2 MBRs memory space for temp result of split
|
||||
algrithm. And plus the new mbr that need to insert, we
|
||||
algorithm. And plus the new mbr that need to insert, we
|
||||
need (n_recs + 3)*MBR size for storing all MBRs.*/
|
||||
buf = static_cast<double*>(mem_heap_alloc(
|
||||
heap, DATA_MBR_LEN * (n_recs + 3)
|
||||
@ -277,7 +277,7 @@ rtr_update_mbr_field(
|
||||
ins_suc = false;
|
||||
|
||||
/* Since btr_cur_update_alloc_zip could
|
||||
reorganize the page, we need to repositon
|
||||
reorganize the page, we need to reposition
|
||||
cursor2. */
|
||||
if (cursor2) {
|
||||
cursor2->page_cur.rec =
|
||||
@ -1888,7 +1888,7 @@ Calculates MBR_AREA(a+b) - MBR_AREA(a)
|
||||
Note: when 'a' and 'b' objects are far from each other,
|
||||
the area increase can be really big, so this function
|
||||
can return 'inf' as a result.
|
||||
Return the area increaed. */
|
||||
Return the area increased. */
|
||||
static double
|
||||
rtree_area_increase(
|
||||
const uchar* a, /*!< in: original mbr. */
|
||||
|
@ -3243,7 +3243,7 @@ static bool innobase_query_caching_table_check_low(
|
||||
retrieval or storing into:
|
||||
|
||||
(1) There should not be any locks on the table.
|
||||
(2) Someother trx shouldn't invalidate the cache before this
|
||||
(2) Some other trx shouldn't invalidate the cache before this
|
||||
transaction started.
|
||||
(3) Read view shouldn't exist. If exists then the view
|
||||
low_limit_id should be greater than or equal to the transaction that
|
||||
@ -6819,7 +6819,7 @@ wsrep_store_key_val_for_row(
|
||||
if (true_len > key_len) {
|
||||
true_len = key_len;
|
||||
}
|
||||
/* cannot exceed max column lenght either, we may need to truncate
|
||||
/* cannot exceed max column length either, we may need to truncate
|
||||
the stored value: */
|
||||
if (true_len > sizeof(sorted)) {
|
||||
true_len = sizeof(sorted);
|
||||
@ -8012,9 +8012,9 @@ func_exit:
|
||||
}
|
||||
|
||||
/** Fill the update vector's "old_vrow" field for those non-updated,
|
||||
but indexed columns. Such columns could stil present in the virtual
|
||||
but indexed columns. Such columns could still be present in the virtual
|
||||
index rec fields even if they are not updated (some other fields updated),
|
||||
so needs to be logged.
|
||||
so they need to be logged.
|
||||
@param[in] prebuilt InnoDB prebuilt struct
|
||||
@param[in,out] vfield field to filled
|
||||
@param[in] o_len actual column length
|
||||
@ -10361,7 +10361,7 @@ ha_innobase::wsrep_append_keys(
|
||||
if (is_null0 != is_null1 ||
|
||||
len0 != len1 ||
|
||||
memcmp(key0, key1, len0)) {
|
||||
/* This key has chaged. If it
|
||||
/* This key has changed. If it
|
||||
is unique, this is an exclusive
|
||||
operation -> upgrade key type */
|
||||
if (key_info->flags & HA_NOSAME) {
|
||||
@ -11065,7 +11065,7 @@ create_index(
|
||||
prefix, key_part->field is not the table's column (it's a
|
||||
"fake" field forged in open_table_from_share() with length
|
||||
equal to the length of the prefix); so we have to go to
|
||||
form->fied. */
|
||||
form->field. */
|
||||
Field* field= form->field[key_part->field->field_index];
|
||||
if (field == NULL)
|
||||
ut_error;
|
||||
|
@ -514,10 +514,10 @@ protected:
|
||||
/** the size of upd_buf in bytes */
|
||||
ulint m_upd_buf_size;
|
||||
|
||||
/** Flags that specificy the handler instance (table) capability. */
|
||||
/** Flags that specify the handler instance (table) capability. */
|
||||
Table_flags m_int_table_flags;
|
||||
|
||||
/** Index into the server's primkary keye meta-data table->key_info{} */
|
||||
/** Index into the server's primary key meta-data table->key_info{} */
|
||||
uint m_primary_key;
|
||||
|
||||
/** this is set to 1 when we are starting a table scan but have
|
||||
@ -532,7 +532,7 @@ protected:
|
||||
bool m_mysql_has_locked;
|
||||
|
||||
/** If true, disable the Rowid Filter. It is disabled when
|
||||
the enigne is intialized for making rnd_pos() calls */
|
||||
the engine is intialized for making rnd_pos() calls */
|
||||
bool m_disable_rowid_filter;
|
||||
};
|
||||
|
||||
|
@ -948,7 +948,7 @@ my_error_innodb(dberr_t error, const char *table, ulint flags)
|
||||
}
|
||||
|
||||
/** Get the name of an erroneous key.
|
||||
@param[in] error_key_num InnoDB number of the erroneus key
|
||||
@param[in] error_key_num InnoDB number of the erroneous key
|
||||
@param[in] ha_alter_info changes that were being performed
|
||||
@param[in] table InnoDB table
|
||||
@return the name of the erroneous key */
|
||||
@ -1539,7 +1539,7 @@ static bool alter_options_need_rebuild(
|
||||
/* Specifying ROW_FORMAT or KEY_BLOCK_SIZE requires
|
||||
rebuilding the table. (These attributes in the .frm
|
||||
file may disagree with the InnoDB data dictionary, and
|
||||
the interpretation of thse attributes depends on
|
||||
the interpretation of these attributes depends on
|
||||
InnoDB parameters. That is why we for now always
|
||||
require a rebuild when these attributes are specified.) */
|
||||
return true;
|
||||
@ -3150,7 +3150,7 @@ innobase_col_check_fk(
|
||||
}
|
||||
|
||||
/** Check whether the foreign key constraint is on base of any stored columns.
|
||||
@param[in] foreign Foriegn key constraing information
|
||||
@param[in] foreign Foreign key constraint information
|
||||
@param[in] table table to which the foreign key objects
|
||||
to be added
|
||||
@param[in] s_cols list of stored column information in the table.
|
||||
@ -8747,7 +8747,7 @@ found_col:
|
||||
DBUG_RETURN(true);
|
||||
}
|
||||
|
||||
/* Check whether a columnn length change alter operation requires
|
||||
/* Check whether a column length change alter operation requires
|
||||
to rebuild the template.
|
||||
@param[in] altered_table TABLE object for new version of table.
|
||||
@param[in] ha_alter_info Structure describing changes to be done
|
||||
@ -10128,7 +10128,7 @@ innobase_update_foreign_cache(
|
||||
} else {
|
||||
/* Drop the foreign key constraints if the
|
||||
table was not rebuilt. If the table is rebuilt,
|
||||
there would not be any foreign key contraints for
|
||||
there would not be any foreign key constraints for
|
||||
it yet in the data dictionary cache. */
|
||||
for (ulint i = 0; i < ctx->num_to_drop_fk; i++) {
|
||||
dict_foreign_t* fk = ctx->drop_fk[i];
|
||||
|
@ -986,7 +986,7 @@ i_s_cmp_fill_low(
|
||||
mutex. Thus, some operation in page0zip.cc could
|
||||
increment a counter between the time we read it and
|
||||
clear it. We could introduce mutex protection, but it
|
||||
could cause a measureable performance hit in
|
||||
could cause a measurable performance hit in
|
||||
page0zip.cc. */
|
||||
table->field[1]->store(zip_stat->compressed, true);
|
||||
table->field[2]->store(zip_stat->compressed_ok, true);
|
||||
@ -4304,7 +4304,7 @@ static int i_s_innodb_fill_buffer_lru(THD *thd, TABLE_LIST *tables, Item *)
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
/* Aquire the mutex before allocating info_buffer, since
|
||||
/* Acquire the mutex before allocating info_buffer, since
|
||||
UT_LIST_GET_LEN(buf_pool.LRU) could change */
|
||||
mysql_mutex_lock(&buf_pool.mutex);
|
||||
|
||||
|
@ -277,7 +277,7 @@ dtuple_set_n_fields(
|
||||
dtuple_t* tuple, /*!< in: tuple */
|
||||
ulint n_fields) /*!< in: number of fields */
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
/** Copies a data tuple's virtaul fields to another. This is a shallow copy;
|
||||
/** Copies a data tuple's virtual fields to another. This is a shallow copy;
|
||||
@param[in,out] d_tuple destination tuple
|
||||
@param[in] s_tuple source tuple */
|
||||
UNIV_INLINE
|
||||
|
@ -89,7 +89,7 @@ enum dberr_t {
|
||||
only happen when there are too many
|
||||
concurrent transactions */
|
||||
DB_UNSUPPORTED, /*!< when InnoDB sees any artefact or
|
||||
a feature that it can't recoginize or
|
||||
a feature that it can't recognize or
|
||||
work with e.g., FT indexes created by
|
||||
a later version of the engine. */
|
||||
|
||||
@ -135,7 +135,7 @@ enum dberr_t {
|
||||
decrypt operation failed because
|
||||
of missing key management plugin,
|
||||
or missing or incorrect key or
|
||||
incorret AES method or algorithm. */
|
||||
incorrect AES method or algorithm. */
|
||||
|
||||
DB_IO_ERROR = 100, /*!< Generic IO error */
|
||||
|
||||
|
@ -257,7 +257,7 @@ enum dict_fld_sys_virtual_enum {
|
||||
};
|
||||
|
||||
/* A number of the columns above occur in multiple tables. These are the
|
||||
length of thos fields. */
|
||||
length of those fields. */
|
||||
#define DICT_FLD_LEN_SPACE 4
|
||||
#define DICT_FLD_LEN_FLAGS 4
|
||||
|
||||
|
@ -1383,7 +1383,7 @@ public:
|
||||
inline void add(dict_table_t *table) noexcept;
|
||||
/** Remove a table definition from the data dictionary cache.
|
||||
@param[in,out] table cached table definition to be evicted
|
||||
@param[in] lru whether this is part of least-recently-used evictiono
|
||||
@param[in] lru whether this is part of least-recently-used eviction
|
||||
@param[in] keep whether to keep (not free) the object */
|
||||
void remove(dict_table_t *table, bool lru= false, bool keep= false) noexcept;
|
||||
|
||||
|
@ -285,8 +285,8 @@ index tables) of a FTS table are in HEX format. */
|
||||
(table->flags2 &= ~(flag) & ((1U << DICT_TF2_BITS) - 1))
|
||||
|
||||
/** Tables could be chained together with Foreign key constraint. When
|
||||
first load the parent table, we would load all of its descedents.
|
||||
This could result in rescursive calls and out of stack error eventually.
|
||||
first load the parent table, we would load all of its descendants.
|
||||
This could result in recursive calls and out of stack error eventually.
|
||||
DICT_FK_MAX_RECURSIVE_LOAD defines the maximum number of recursive loads,
|
||||
when exceeded, the child table will not be loaded. It will be loaded when
|
||||
the foreign constraint check needs to be run. */
|
||||
|
@ -159,7 +159,7 @@ struct table_name_t
|
||||
Note: the spatial status is part of persistent undo log,
|
||||
so we should not modify the values in MySQL 5.7 */
|
||||
enum spatial_status_t {
|
||||
/* Unkown status (undo format in 5.7.9) */
|
||||
/* Unknown status (undo format in 5.7.9) */
|
||||
SPATIAL_UNKNOWN = 0,
|
||||
|
||||
/** Not used in gis index. */
|
||||
|
@ -46,7 +46,7 @@ void fil_crypt_threads_signal(bool broadcast= false);
|
||||
/**
|
||||
* CRYPT_SCHEME_UNENCRYPTED
|
||||
*
|
||||
* Used as intermediate state when convering a space from unencrypted
|
||||
* Used as intermediate state when converting a space from unencrypted
|
||||
* to encrypted
|
||||
*/
|
||||
/**
|
||||
@ -74,7 +74,7 @@ struct key_struct
|
||||
extern ulong srv_encrypt_tables;
|
||||
|
||||
/** Mutex helper for crypt_data->scheme
|
||||
@param[in, out] schme encryption scheme
|
||||
@param[in, out] scheme encryption scheme
|
||||
@param[in] exit should we exit or enter mutex ? */
|
||||
void
|
||||
crypt_data_scheme_locker(
|
||||
|
@ -389,7 +389,7 @@ private:
|
||||
/** Whether the tablespace is being imported */
|
||||
bool being_imported= false;
|
||||
|
||||
/** Whether any corrupton of this tablespace has been reported */
|
||||
/** Whether any corruption of this tablespace has been reported */
|
||||
mutable std::atomic_flag is_corrupted{false};
|
||||
|
||||
public:
|
||||
|
@ -698,7 +698,7 @@ inline uint32_t fsp_flags_is_incompatible_mysql(uint32_t flags)
|
||||
{
|
||||
/*
|
||||
MySQL-8.0 SDI flag (bit 14),
|
||||
or MySQL 5.7 Encyption flag (bit 13)
|
||||
or MySQL 5.7 Encryption flag (bit 13)
|
||||
*/
|
||||
return flags >> 13 & 3;
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ those defined in mysql file ft_global.h */
|
||||
|
||||
#define FTS_INDEX_TABLE_IND_NAME "FTS_INDEX_TABLE_IND"
|
||||
|
||||
/** The number of FTS index partitions for a fulltext idnex */
|
||||
/** The number of FTS index partitions for a fulltext index */
|
||||
#define FTS_NUM_AUX_INDEX 6
|
||||
|
||||
/** Threshold where our optimize thread automatically kicks in */
|
||||
|
@ -50,7 +50,7 @@ enum fts_table_state_enum {
|
||||
|
||||
typedef enum fts_table_state_enum fts_table_state_t;
|
||||
|
||||
/** The default time to wait for the background thread (in microsecnds). */
|
||||
/** The default time to wait for the background thread (in microseconds). */
|
||||
#define FTS_MAX_BACKGROUND_THREAD_WAIT 10000
|
||||
|
||||
/** Maximum number of iterations to wait before we complain */
|
||||
|
@ -79,7 +79,7 @@ struct fts_index_cache_t {
|
||||
CHARSET_INFO* charset; /*!< charset */
|
||||
};
|
||||
|
||||
/** Stop word control infotmation. */
|
||||
/** Stop word control information. */
|
||||
struct fts_stopword_t {
|
||||
ulint status; /*!< Status of the stopword tree */
|
||||
ib_alloc_t* heap; /*!< The memory allocator to use */
|
||||
|
@ -734,7 +734,7 @@ public:
|
||||
private:
|
||||
bool m_initialised;
|
||||
|
||||
/** mutex proteting the locks */
|
||||
/** mutex protecting the locks */
|
||||
alignas(CPU_LEVEL1_DCACHE_LINESIZE)
|
||||
IF_DBUG(srw_lock_debug,srw_spin_lock) latch;
|
||||
#ifdef SUX_LOCK_GENERIC
|
||||
|
@ -259,7 +259,7 @@ updated but the lock prevents insert of a user record to the end of
|
||||
the page.
|
||||
Next key locks will prevent the phantom problem where new rows
|
||||
could appear to SELECT result sets after the select operation has been
|
||||
performed. Prevention of phantoms ensures the serilizability of
|
||||
performed. Prevention of phantoms ensures the serializability of
|
||||
transactions.
|
||||
What should we check if an insert of a new record is wanted?
|
||||
Only the lock on the next record on the same page, because also the
|
||||
|
@ -257,7 +257,7 @@ struct ib_lock_t
|
||||
bool can_be_bypassed(bool has_s_lock_or_stronger) const noexcept
|
||||
{
|
||||
ut_ad(!is_table());
|
||||
/* We don't neet do check supremum bit in the lock's bitmap here,
|
||||
/* We don't need to check supremum bit in the lock's bitmap here,
|
||||
because the function is always called after checking for
|
||||
bypass_mode, which already contains check for supremum. */
|
||||
ut_ad(!is_insert_intention() || is_gap());
|
||||
|
@ -191,7 +191,7 @@ The end of the mini-transaction would be indicated by the end byte
|
||||
0x00 or 0x01; @see log_sys.get_sequence_bit().
|
||||
If log_sys.is_encrypted(), that is followed by 8 bytes of nonce
|
||||
(part of initialization vector). That will be followed by 4 bytes
|
||||
of CRC-32C of the entire mini-tranasction, excluding the end byte. */
|
||||
of CRC-32C of the entire mini-transaction, excluding the end byte. */
|
||||
|
||||
/** Redo log record types. These bit patterns (3 bits) will be written
|
||||
to the redo log file, so the existing codes or their interpretation on
|
||||
|
@ -35,7 +35,7 @@ Created 10/13/2010 Jimmy Yang
|
||||
#include "btr0bulk.h"
|
||||
#include "srv0srv.h"
|
||||
|
||||
/** This structure defineds information the scan thread will fetch
|
||||
/** This structure defines information the scan thread will fetch
|
||||
and put to the linked list for parallel tokenization/sort threads
|
||||
to process */
|
||||
typedef struct fts_doc_item fts_doc_item_t;
|
||||
|
@ -649,7 +649,7 @@ struct row_prebuilt_t {
|
||||
version is built in consistent read */
|
||||
bool in_fts_query; /*!< Whether we are in a FTS query */
|
||||
bool fts_doc_id_in_read_set; /*!< true if table has externally
|
||||
defined FTS_DOC_ID coulmn. */
|
||||
defined FTS_DOC_ID column. */
|
||||
/*----------------------*/
|
||||
ulonglong autoinc_last_value;
|
||||
/*!< last value of AUTO-INC interval */
|
||||
|
@ -46,7 +46,7 @@ row_quiesce_table_start(
|
||||
|
||||
/*********************************************************************//**
|
||||
Set a table's quiesce state.
|
||||
@return DB_SUCCESS or errro code. */
|
||||
@return DB_SUCCESS or error code. */
|
||||
dberr_t
|
||||
row_quiesce_set_state(
|
||||
/*==================*/
|
||||
|
@ -71,7 +71,7 @@ is assigned to handle an undo log record in the chain of different versions
|
||||
of the record, and the other thread happens to get the x-latch to the
|
||||
clustered index record at the right time.
|
||||
If a query thread notices that the clustered index record it is looking
|
||||
for is missing, or the roll ptr field in the record doed not point to the
|
||||
for is missing, or the roll ptr field in the record does not point to the
|
||||
undo log record the thread was assigned to handle, then it gives up the undo
|
||||
task for that undo log record, and fetches the next. This situation can occur
|
||||
just in the case where the transaction modified the same record several times
|
||||
|
@ -79,7 +79,7 @@ struct monitor_value_t {
|
||||
monitor_running_t mon_status; /* whether monitor still running */
|
||||
};
|
||||
|
||||
/** Follwoing defines are possible values for "monitor_type" field in
|
||||
/** Following defines are possible values for "monitor_type" field in
|
||||
"struct monitor_info" */
|
||||
enum monitor_type_t {
|
||||
MONITOR_NONE = 0, /*!< No monitoring */
|
||||
@ -156,7 +156,7 @@ enum monitor_id_t {
|
||||
MONITOR_OVLD_ROW_LOCK_WAIT,
|
||||
MONITOR_OVLD_LOCK_AVG_WAIT_TIME,
|
||||
|
||||
/* Buffer and I/O realted counters. */
|
||||
/* Buffer and I/O related counters. */
|
||||
MONITOR_MODULE_BUFFER,
|
||||
MONITOR_OVLD_BUFFER_POOL_SIZE,
|
||||
MONITOR_OVLD_BUF_POOL_READS,
|
||||
@ -432,11 +432,11 @@ counter option. */
|
||||
(monitor_set_tbl[unsigned(monitor) / NUM_BITS_ULINT] & \
|
||||
(ulint(1) << (unsigned(monitor) % NUM_BITS_ULINT)))
|
||||
|
||||
/** The actual monitor counter array that records each monintor counter
|
||||
/** The actual monitor counter array that records each monitor counter
|
||||
value */
|
||||
extern monitor_value_t innodb_counter_value[NUM_MONITOR];
|
||||
|
||||
/** Following are macro defines for basic montior counter manipulations.
|
||||
/** Following are macro defines for basic monitor counter manipulations.
|
||||
Please note we do not provide any synchronization for these monitor
|
||||
operations due to performance consideration. Most counters can
|
||||
be placed under existing mutex protections in respective code
|
||||
@ -679,7 +679,7 @@ is monotonically increasing, only max value needs to be updated */
|
||||
} \
|
||||
}
|
||||
|
||||
/** Some values such as log sequence number are montomically increasing
|
||||
/** Some values such as log sequence number are monotonically increasing
|
||||
number, do not need to record max/min values */
|
||||
#define MONITOR_SET_SIMPLE(monitor, value) \
|
||||
MONITOR_CHECK_DEFINED(value); \
|
||||
|
@ -150,7 +150,7 @@ extern mysql_mutex_t srv_monitor_file_mutex;
|
||||
extern FILE* srv_monitor_file;
|
||||
/** Mutex for locking srv_misc_tmpfile */
|
||||
extern mysql_mutex_t srv_misc_tmpfile_mutex;
|
||||
/* Temporary file for miscellanous diagnostic output */
|
||||
/* Temporary file for miscellaneous diagnostic output */
|
||||
extern FILE* srv_misc_tmpfile;
|
||||
|
||||
/* Server parameters which are read from the initfile */
|
||||
@ -584,7 +584,7 @@ struct export_var_t{
|
||||
my_bool innodb_buffer_pool_load_incomplete;/*!< Buf pool load incomplete */
|
||||
ulint innodb_buffer_pool_pages_total; /*!< Buffer pool size */
|
||||
ulint innodb_buffer_pool_bytes_data; /*!< File bytes used */
|
||||
ulint innodb_buffer_pool_pages_misc; /*!< Miscellanous pages */
|
||||
ulint innodb_buffer_pool_pages_misc; /*!< Miscellaneous pages */
|
||||
#ifdef UNIV_DEBUG
|
||||
ulint innodb_buffer_pool_pages_latched; /*!< Latched pages */
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
@ -65,7 +65,7 @@ class purge_sys_t
|
||||
{
|
||||
public:
|
||||
typedef std::vector<uint64_t, ut_allocator<uint64_t>> container_type;
|
||||
/** Number of bits reseved to shift trx_no in purge queue element */
|
||||
/** Number of bits reserved to shift trx_no in purge queue element */
|
||||
static constexpr unsigned TRX_NO_SHIFT= 8;
|
||||
|
||||
bool empty() const { return m_array.empty(); }
|
||||
|
@ -1068,7 +1068,7 @@ public:
|
||||
/**
|
||||
Takes MVCC snapshot.
|
||||
|
||||
To reduce malloc probablility we reserve rw_trx_hash.size() + 32 elements
|
||||
To reduce malloc probability we reserve rw_trx_hash.size() + 32 elements
|
||||
in ids.
|
||||
|
||||
For details about get_rw_trx_hash_version() != get_max_trx_id() spin
|
||||
|
@ -117,7 +117,7 @@ Initializes the base node of a two-way list.
|
||||
}
|
||||
|
||||
/** Functor for accessing the embedded node within a list element. This is
|
||||
required because some lists can have the node emebedded inside a nested
|
||||
required because some lists can have the node embedded inside a nested
|
||||
struct/union. See lock0priv.h (table locks) for an example. It provides a
|
||||
specialised functor to grant access to the list node. */
|
||||
template <typename Type>
|
||||
|
@ -204,7 +204,7 @@ private:
|
||||
/** Upper limit of used space */
|
||||
Element* m_last;
|
||||
|
||||
/** Priority queue ordered on the pointer addresse. */
|
||||
/** Priority queue ordered on the pointer addresses. */
|
||||
pqueue_t m_pqueue;
|
||||
|
||||
/** Lock strategy to use */
|
||||
|
@ -843,7 +843,7 @@ lock_rec_has_to_wait(
|
||||
/* If the upper server layer has already decided on the
|
||||
commit order between the transaction requesting the
|
||||
lock and the transaction owning the lock, we do not
|
||||
need to wait for gap locks. Such ordeering by the upper
|
||||
need to wait for gap locks. Such ordering by the upper
|
||||
server layer happens in parallel replication, where the
|
||||
commit order is fixed to match the original order on the
|
||||
master.
|
||||
@ -2747,7 +2747,7 @@ lock_rec_inherit_to_gap(hash_cell_t &heir_cell, const page_id_t heir,
|
||||
not create bogus gap locks for non-gap locks for READ UNCOMMITTED and
|
||||
READ COMMITTED isolation levels. LOCK_ORDINARY and
|
||||
LOCK_GAP require a gap before the record to be locked, that is why
|
||||
setting lock on supremmum is necessary. */
|
||||
setting lock on supremum is necessary. */
|
||||
((!from_split || !lock->is_record_not_gap()) &&
|
||||
lock->mode() != (lock_trx->duplicates ? LOCK_S : LOCK_X))))
|
||||
{
|
||||
@ -4172,7 +4172,7 @@ void lock_table_resurrect(dict_table_t *table, trx_t *trx, lock_mode mode)
|
||||
|
||||
{
|
||||
/* This is executed at server startup while no connections
|
||||
are alowed. Do not bother with lock elision. */
|
||||
are allowed. Do not bother with lock elision. */
|
||||
LockMutexGuard g{SRW_LOCK_CALL};
|
||||
ut_ad(!lock_table_other_has_incompatible(trx, LOCK_WAIT, table, mode));
|
||||
|
||||
@ -4714,7 +4714,7 @@ void lock_release_on_drop(trx_t *trx)
|
||||
|
||||
/** Reset a lock bit and rebuild waiting queue.
|
||||
@param cell rec hash cell of in_lock
|
||||
@param lock the lock with supemum bit set */
|
||||
@param lock the lock with supremum bit set */
|
||||
static void lock_rec_unlock(hash_cell_t &cell, lock_t *lock, ulint heap_no)
|
||||
{
|
||||
ut_ad(lock_rec_get_nth_bit(lock, heap_no));
|
||||
@ -4899,7 +4899,7 @@ reiterate:
|
||||
lock_sys.rd_unlock();
|
||||
trx->mutex_unlock();
|
||||
mtr.start();
|
||||
/* The curr thread is asociated with trx, which was just
|
||||
/* The curr thread is associated with trx, which was just
|
||||
moved to XA PREPARE state. Other threads may not modify the
|
||||
existing lock objects of trx; they may only create new ones
|
||||
in lock_rec_convert_impl_to_expl() or lock_rec_move(). */
|
||||
@ -5327,7 +5327,7 @@ static ulint lock_get_n_rec_locks()
|
||||
|
||||
/*********************************************************************//**
|
||||
Prints info of locks for all transactions.
|
||||
@return FALSE if not able to acquire lock_sys.latch (and dislay info) */
|
||||
@return FALSE if not able to acquire lock_sys.latch (and display info) */
|
||||
ibool
|
||||
lock_print_info_summary(
|
||||
/*====================*/
|
||||
@ -7330,7 +7330,7 @@ and less modified rows. Bit 0 is used to prefer orig_trx in case of a tie.
|
||||
ut_ad(victim->state == TRX_STATE_ACTIVE);
|
||||
|
||||
/* victim->lock.was_chosen_as_deadlock_victim must always be set before
|
||||
releasing waiting locks and reseting trx->lock.wait_lock */
|
||||
releasing waiting locks and resetting trx->lock.wait_lock */
|
||||
victim->lock.was_chosen_as_deadlock_victim= true;
|
||||
DEBUG_SYNC_C("deadlock_report_before_lock_releasing");
|
||||
lock_cancel_waiting_and_release<true>(victim->lock.wait_lock);
|
||||
|
@ -257,7 +257,7 @@ lock_prdt_has_lock(
|
||||
lock);
|
||||
|
||||
/* if the lock predicate operator is the same
|
||||
as the one to look, and prdicate test is successful,
|
||||
as the one to look, and predicate test is successful,
|
||||
then we find a lock */
|
||||
if (cur_prdt->op == prdt->op
|
||||
&& lock_prdt_consistent(cur_prdt, prdt, 0)) {
|
||||
|
@ -300,7 +300,7 @@ page_corrupted:
|
||||
if (UNIV_UNLIKELY(block.page.id().page_no() < 3 ||
|
||||
block.page.zip.ssize))
|
||||
goto record_corrupted;
|
||||
static_assert(INIT_ROW_FORMAT_REDUNDANT == 0, "compatiblity");
|
||||
static_assert(INIT_ROW_FORMAT_REDUNDANT == 0, "compatibility");
|
||||
static_assert(INIT_ROW_FORMAT_DYNAMIC == 1, "compatibility");
|
||||
if (UNIV_UNLIKELY(!rlen))
|
||||
goto record_corrupted;
|
||||
@ -680,7 +680,7 @@ static struct
|
||||
p.first->second.lsn= lsn;
|
||||
p.first->second.file_name= defer.file_name;
|
||||
}
|
||||
/* Add the newly added defered space and change the file name */
|
||||
/* Add the newly added deferred space and change the file name */
|
||||
recv_spaces_t::iterator it{recv_spaces.find(space)};
|
||||
if (it != recv_spaces.end())
|
||||
it->second.name = defer.file_name;
|
||||
|
@ -19,7 +19,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
|
||||
The group commit synchronization used in log_write_up_to()
|
||||
works as follows
|
||||
|
||||
For simplicity, lets consider only write operation,synchronozation of
|
||||
For simplicity, lets consider only write operation,synchronization of
|
||||
flush operation works the same.
|
||||
|
||||
Rules of the game
|
||||
@ -42,17 +42,17 @@ Fixes a) but burns CPU unnecessary.
|
||||
|
||||
c) Mutex / condition variable combo.
|
||||
|
||||
Condtion variable notifies (broadcast) all waiters, whenever
|
||||
Condition variable notifies (broadcast) all waiters, whenever
|
||||
last written lsn is changed.
|
||||
|
||||
Has a disadvantage of many suprious wakeups, stress on OS scheduler,
|
||||
Has a disadvantage of many spurious wakeups, stress on OS scheduler,
|
||||
and mutex contention.
|
||||
|
||||
d) Something else.
|
||||
Make use of the waiter's lsn parameter, and only wakeup "right" waiting
|
||||
threads.
|
||||
|
||||
We chose d). Even if implementation is more complicated than alternatves
|
||||
We chose d). Even if implementation is more complicated than alternatives
|
||||
due to the need to maintain list of waiters, it provides the best performance.
|
||||
|
||||
See group_commit_lock implementation for details.
|
||||
|
@ -3251,7 +3251,7 @@ more concurrent threads via thread_group setting.
|
||||
|
||||
@param[in] n_reader_threads - max number of concurrently
|
||||
executing read callbacks
|
||||
@param[in] n_writer_thread - max number of cuncurrently
|
||||
@param[in] n_writer_thread - max number of concurrently
|
||||
executing write callbacks
|
||||
@return 0 for success, !=0 for error.
|
||||
*/
|
||||
|
@ -2205,7 +2205,7 @@ wrong_page_type:
|
||||
int ret = cmp_rec_rec(
|
||||
rec, old_rec, offsets, old_offsets, index);
|
||||
|
||||
/* For spatial index, on nonleaf leavel, we
|
||||
/* For spatial index, on nonleaf level, we
|
||||
allow recs to be equal. */
|
||||
if (ret <= 0 && !(ret == 0 && index->is_spatial()
|
||||
&& !page_is_leaf(page))) {
|
||||
|
@ -411,7 +411,7 @@ opt_calc_index_goodness(
|
||||
|
||||
/*******************************************************************//**
|
||||
Calculates the number of matched fields based on an index goodness.
|
||||
@return number of excatly or partially matched fields */
|
||||
@return number of exactly or partially matched fields */
|
||||
UNIV_INLINE
|
||||
ulint
|
||||
opt_calc_n_fields_from_goodness(
|
||||
|
@ -121,7 +121,7 @@ created. Thus we can easily see if this record was changed by the
|
||||
creating transaction. Because we already have clustered record we can
|
||||
access roll_ptr. Using this roll_ptr we can fetch undo record.
|
||||
We can now check that undo_no of the undo record is less than undo_no of the
|
||||
trancaction which created a view when cursor was created. We see this
|
||||
transaction which created a view when cursor was created. We see this
|
||||
clustered record only in case when record undo_no is less than undo_no
|
||||
in the view. If this is not true we build based on undo_rec previous
|
||||
version of the record. This record is found because purge can't remove
|
||||
|
@ -504,7 +504,7 @@ row_merge_fts_doc_tokenize(
|
||||
row_merge_fts_doc_tokenize_by_parser(doc,
|
||||
parser, t_ctx);
|
||||
|
||||
/* Just indictate we have parsed all the word */
|
||||
/* Just indicate that we have parsed all words */
|
||||
t_ctx->processed_len += 1;
|
||||
}
|
||||
|
||||
@ -593,7 +593,7 @@ row_merge_fts_doc_tokenize(
|
||||
variable-length column is less than 128 bytes or the
|
||||
maximum length is less than 256 bytes. */
|
||||
|
||||
/* One variable length column, word with its lenght less than
|
||||
/* One variable length column, word with its length less than
|
||||
fts_max_token_size, add one extra size and one extra byte.
|
||||
|
||||
Since the max length for FTS token now is larger than 255,
|
||||
@ -1276,7 +1276,7 @@ row_fts_insert_tuple(
|
||||
ulint num_item;
|
||||
|
||||
/* Getting a new word, flush the last position info
|
||||
for the currnt word in fts_node */
|
||||
for the current word in fts_node */
|
||||
if (ib_vector_size(positions) > 0) {
|
||||
fts_cache_node_add_positions(
|
||||
NULL, fts_node, *in_doc_id, positions);
|
||||
|
@ -69,7 +69,7 @@ struct row_stats_t {
|
||||
found in the index */
|
||||
|
||||
ulint m_n_purged; /*!< Number of records purged
|
||||
optimisatically */
|
||||
optimistically */
|
||||
|
||||
ulint m_n_rows; /*!< Number of rows */
|
||||
|
||||
@ -269,7 +269,7 @@ struct row_import {
|
||||
dict_col_t* m_cols; /*!< Column data */
|
||||
|
||||
byte** m_col_names; /*!< Column names, we store the
|
||||
column naems separately becuase
|
||||
column names separately because
|
||||
there is no field to store the
|
||||
value in dict_col_t */
|
||||
|
||||
@ -385,7 +385,7 @@ public:
|
||||
|
||||
/** Class that purges delete marked records from indexes, both secondary
|
||||
and cluster. It does a pessimistic delete. This should only be done if we
|
||||
couldn't purge the delete marked reocrds during Phase I. */
|
||||
couldn't purge the delete marked records during Phase I. */
|
||||
class IndexPurge {
|
||||
public:
|
||||
/** Constructor
|
||||
@ -1007,7 +1007,7 @@ private:
|
||||
rec_t* rec,
|
||||
const rec_offs* offsets) UNIV_NOTHROW;
|
||||
|
||||
/** In the clustered index, adjist the BLOB pointers as needed.
|
||||
/** In the clustered index, adjust the BLOB pointers as needed.
|
||||
Also update the BLOB reference, write the new space id.
|
||||
@param rec record to update
|
||||
@param offsets column offsets for the record
|
||||
@ -1135,7 +1135,7 @@ row_import::get_n_rows(
|
||||
return(index->m_stats.m_n_rows);
|
||||
}
|
||||
|
||||
/** Get the number of rows for which purge failed uding the convert phase.
|
||||
/** Get the number of rows for which purge failed during the convert phase.
|
||||
@param name index name
|
||||
@return number of rows for which purge failed. */
|
||||
ulint
|
||||
@ -4729,7 +4729,7 @@ row_import_for_mysql(
|
||||
trx_t* trx = prebuilt->trx;
|
||||
|
||||
/* The caller assured that this is not read_only_mode and that no
|
||||
temorary tablespace is being imported. */
|
||||
temporary tablespace is being imported. */
|
||||
ut_ad(!srv_read_only_mode);
|
||||
ut_ad(!table->is_temporary());
|
||||
|
||||
|
@ -2334,7 +2334,7 @@ row_ins_duplicate_error_in_clust(
|
||||
/* NOTE: For unique non-clustered indexes there may be any number
|
||||
of delete marked records with the same value for the non-clustered
|
||||
index key (remember multiversioning), and which differ only in
|
||||
the row refererence part of the index record, containing the
|
||||
the row reference part of the index record, containing the
|
||||
clustered index key fields. For such a secondary index record,
|
||||
to avoid race condition, we must FIRST do the insertion and after
|
||||
that check that the uniqueness condition is not breached! */
|
||||
|
@ -829,7 +829,7 @@ error:
|
||||
|
||||
if (fixed_len) {
|
||||
#ifdef UNIV_DEBUG
|
||||
/* len should be between size calcualted base on
|
||||
/* len should be between size calculated based on
|
||||
mbmaxlen and mbminlen */
|
||||
ut_ad(len <= fixed_len);
|
||||
ut_ad(!col->mbmaxlen || len >= col->mbminlen
|
||||
@ -4337,8 +4337,8 @@ void row_merge_drop_temp_indexes()
|
||||
}
|
||||
|
||||
|
||||
/** Create temporary merge files in the given paramater path, and if
|
||||
UNIV_PFS_IO defined, register the file descriptor with Performance Schema.
|
||||
/** Create temporary merge files in the given parameter path, and if
|
||||
UNIV_PFS_IO is defined, register the file descriptor with Performance Schema.
|
||||
@param[in] path location for creating temporary merge files, or NULL
|
||||
@return File descriptor */
|
||||
static pfs_os_file_t row_merge_file_create_mode(const char *path, int mode)
|
||||
|
@ -782,7 +782,7 @@ row_create_prebuilt(
|
||||
|
||||
/* Maximum size of the buffer needed for conversion of INTs from
|
||||
little endian format to big endian format in an index. An index
|
||||
can have maximum 16 columns (MAX_REF_PARTS) in it. Therfore
|
||||
can have maximum 16 columns (MAX_REF_PARTS) in it. Therefore
|
||||
Max size for PK: 16 * 8 bytes (BIGINT's size) = 128 bytes
|
||||
Max size Secondary index: 16 * 8 bytes + PK = 256 bytes. */
|
||||
#define MAX_SRCH_KEY_VAL_BUFFER 2* (8 * MAX_REF_PARTS)
|
||||
@ -1821,7 +1821,7 @@ void thd_get_query_start_data(THD *thd, char *buf);
|
||||
|
||||
This is used in UPDATE CASCADE/SET NULL of a system versioned referenced table.
|
||||
|
||||
node->historical_row: dtuple_t containing pointers of row changed by refertial
|
||||
node->historical_row: dtuple_t containing pointers of row changed by referential
|
||||
action.
|
||||
|
||||
@param[in] thr current query thread
|
||||
@ -2132,7 +2132,7 @@ row_create_index_for_mysql(
|
||||
|
||||
/* For temp-table we avoid insertion into SYSTEM TABLES to
|
||||
maintain performance and so we have separate path that directly
|
||||
just updates dictonary cache. */
|
||||
just updates dictionary cache. */
|
||||
if (!table->is_temporary()) {
|
||||
ut_ad(trx->state == TRX_STATE_ACTIVE);
|
||||
ut_ad(trx->dict_operation);
|
||||
|
@ -1647,7 +1647,7 @@ row_purge_step(
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
/***********************************************************//**
|
||||
Validate the persisent cursor. The purge node has two references
|
||||
Validate the persistent cursor. The purge node has two references
|
||||
to the clustered index record - one via the ref member, and the
|
||||
other via the persistent cursor. These two references must match
|
||||
each other if the found_clust flag is set.
|
||||
|
@ -4509,8 +4509,8 @@ early_not_found:
|
||||
}
|
||||
}
|
||||
|
||||
/* We don't support sequencial scan for Rtree index, because it
|
||||
is no meaning to do so. */
|
||||
/* We don't support sequential scan for Rtree index because it
|
||||
is pointless. */
|
||||
if (dict_index_is_spatial(index) && !RTREE_SEARCH_MODE(mode)) {
|
||||
trx->op_info = "";
|
||||
DBUG_RETURN(DB_END_OF_INDEX);
|
||||
@ -4731,7 +4731,7 @@ wait_table_again:
|
||||
if (UNIV_LIKELY(direction != 0)) {
|
||||
if (spatial_search) {
|
||||
/* R-Tree access does not need to do
|
||||
cursor position and resposition */
|
||||
cursor position and reposition */
|
||||
goto next_rec;
|
||||
}
|
||||
|
||||
|
@ -402,8 +402,8 @@ row_vers_impl_x_locked(
|
||||
const rec_t* clust_rec;
|
||||
dict_index_t* clust_index;
|
||||
|
||||
/* The function must not be invoked under lock_sys latch to prevert
|
||||
latching orded violation, i.e. page latch must be acquired before
|
||||
/* The function must not be invoked under lock_sys latch to prevent
|
||||
latching order violation, i.e. page latch must be acquired before
|
||||
lock_sys latch */
|
||||
lock_sys.assert_unlocked();
|
||||
/* The current function can be called from lock_rec_unlock_unmodified()
|
||||
|
@ -1582,7 +1582,7 @@ srv_mon_process_existing_counter(
|
||||
& MONITOR_DISPLAY_CURRENT) {
|
||||
MONITOR_SET(monitor_id, value);
|
||||
} else {
|
||||
/* Most status counters are montonically
|
||||
/* Most status counters are monotonically
|
||||
increasing, no need to update their
|
||||
minimum values. Only do so
|
||||
if "update_min" set to TRUE */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user