1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-30 16:24:05 +03:00

MDEV-10963 Fragmented BINLOG query

The problem was originally stated in
  http://bugs.mysql.com/bug.php?id=82212
The size of an base64-encoded Rows_log_event exceeds its
vanilla byte representation in 4/3 times.
When a binlogged event size is about 1GB mysqlbinlog generates
a BINLOG query that can't be send out due to its size.

It is fixed with fragmenting the BINLOG argument C-string into
(approximate) halves when the base64 encoded event is over 1GB size.
The mysqlbinlog in such case puts out

    SET @binlog_fragment_0='base64-encoded-fragment_0';
    SET @binlog_fragment_1='base64-encoded-fragment_1';
    BINLOG @binlog_fragment_0, @binlog_fragment_1;

to represent a big BINLOG.
For prompt memory release BINLOG handler is made to reset the BINLOG argument
user variables in the middle of processing, as if @binlog_fragment_{0,1} = NULL
is assigned.

Notice the 2 fragments are enough, though the client and server still may
need to tweak their @@max_allowed_packet to satisfy to the fragment
size (which they would have to do anyway with greater number of
fragments, should that be desired).

On the lower level the following changes are made:

Log_event::print_base64()
  remains to call encoder and store the encoded data into a cache but
  now *without* doing any formatting. The latter is left for time
  when the cache is copied to an output file (e.g mysqlbinlog output).
  No formatting behavior is also reflected by the change in the meaning
  of the last argument which specifies whether to cache the encoded data.

Rows_log_event::print_helper()
  is made to invoke a specialized fragmented cache-to-file copying function
  which is

copy_cache_to_file_wrapped()
  that takes care of fragmenting also optionally wraps encoded
  strings (fragments) into SQL stanzas.

my_b_copy_to_file()
  is refactored to into my_b_copy_all_to_file(). The former function
  is generalized
  to accepts more a limit argument to constraint the copying and does
  not reinitialize anymore the cache into reading mode.
  The limit does not do any effect on the fully read cache.
This commit is contained in:
Andrei Elkin
2018-07-27 22:55:18 +03:00
parent f9ac7032cb
commit 5d48ea7d07
16 changed files with 510 additions and 67 deletions

View File

@ -28,6 +28,65 @@
// START_EVENT_V3,
// Log_event_type,
// Log_event
/**
Copy fragments into the standard placeholder thd->lex->comment.str.
Compute the size of the (still) encoded total,
allocate and then copy fragments one after another.
The size can exceed max(max_allowed_packet) which is not a
problem as no String instance is created off this char array.
@param thd THD handle
@return
0 at success,
-1 otherwise.
*/
int binlog_defragment(THD *thd)
{
user_var_entry *entry[2];
LEX_STRING name[2]= { thd->lex->comment, thd->lex->ident };
/* compute the total size */
thd->lex->comment.str= NULL;
thd->lex->comment.length= 0;
for (uint k= 0; k < 2; k++)
{
entry[k]=
(user_var_entry*) my_hash_search(&thd->user_vars, (uchar*) name[k].str,
name[k].length);
if (!entry[k] || entry[k]->type != STRING_RESULT)
{
my_error(ER_WRONG_TYPE_FOR_VAR, MYF(0), name[k].str);
return -1;
}
thd->lex->comment.length += entry[k]->length;
}
thd->lex->comment.str= // to be freed by the caller
(char *) my_malloc(thd->lex->comment.length, MYF(MY_WME));
if (!thd->lex->comment.str)
{
my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1);
return -1;
}
/* fragments are merged into allocated buf while the user var:s get reset */
size_t gathered_length= 0;
for (uint k=0; k < 2; k++)
{
memcpy(thd->lex->comment.str + gathered_length, entry[k]->value,
entry[k]->length);
gathered_length += entry[k]->length;
update_hash(entry[k], true, NULL, 0, STRING_RESULT, &my_charset_bin, 0);
}
DBUG_ASSERT(gathered_length == thd->lex->comment.length);
return 0;
}
/**
Execute a BINLOG statement.
@ -53,14 +112,6 @@ void mysql_client_binlog_statement(THD* thd)
if (check_global_access(thd, SUPER_ACL))
DBUG_VOID_RETURN;
size_t coded_len= thd->lex->comment.length;
if (!coded_len)
{
my_error(ER_SYNTAX_ERROR, MYF(0));
DBUG_VOID_RETURN;
}
size_t decoded_len= base64_needed_decoded_length(coded_len);
/*
option_bits will be changed when applying the event. But we don't expect
it be changed permanently after BINLOG statement, so backup it first.
@ -81,6 +132,8 @@ void mysql_client_binlog_statement(THD* thd)
int err;
Relay_log_info *rli;
rpl_group_info *rgi;
char *buf= NULL;
size_t coded_len= 0, decoded_len= 0;
rli= thd->rli_fake;
if (!rli)
@ -102,15 +155,13 @@ void mysql_client_binlog_statement(THD* thd)
rgi->thd= thd;
const char *error= 0;
char *buf= (char *) my_malloc(decoded_len, MYF(MY_WME));
Log_event *ev = 0;
my_bool is_fragmented= FALSE;
/*
Out of memory check
*/
if (!(rli &&
rli->relay_log.description_event_for_exec &&
buf))
if (!(rli && rli->relay_log.description_event_for_exec))
{
my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1); /* needed 1 bytes */
goto end;
@ -119,6 +170,23 @@ void mysql_client_binlog_statement(THD* thd)
rli->sql_driver_thd= thd;
rli->no_storage= TRUE;
if (unlikely(is_fragmented= thd->lex->comment.str && thd->lex->ident.str))
if (binlog_defragment(thd))
goto end;
if (!(coded_len= thd->lex->comment.length))
{
my_error(ER_SYNTAX_ERROR, MYF(0));
goto end;
}
decoded_len= base64_needed_decoded_length(coded_len);
if (!(buf= (char *) my_malloc(decoded_len, MYF(MY_WME))))
{
my_error(ER_OUTOFMEMORY, MYF(ME_FATALERROR), 1);
goto end;
}
for (char const *strptr= thd->lex->comment.str ;
strptr < thd->lex->comment.str + thd->lex->comment.length ; )
{
@ -272,6 +340,8 @@ void mysql_client_binlog_statement(THD* thd)
my_ok(thd);
end:
if (unlikely(is_fragmented))
my_free(thd->lex->comment.str);
thd->variables.option_bits= thd_options;
rgi->slave_close_thread_tables(thd);
my_free(buf);