mirror of
https://github.com/MariaDB/server.git
synced 2025-08-01 03:47:19 +03:00
Merge remote-tracking branch 'origin/10.2' into bb-10.2-ext
This commit is contained in:
@ -727,3 +727,76 @@ A COUNT(*)
|
|||||||
DROP VIEW v1;
|
DROP VIEW v1;
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
End of 10.1 tests
|
End of 10.1 tests
|
||||||
|
#
|
||||||
|
# MDEV-14093: GROUP BY with HAVING over function + ORDER BY
|
||||||
|
#
|
||||||
|
CREATE TABLE _authors (
|
||||||
|
id MEDIUMINT(8) UNSIGNED AUTO_INCREMENT,
|
||||||
|
name VARCHAR(100),
|
||||||
|
some_field MEDIUMINT(8) UNSIGNED,
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
index(some_field)
|
||||||
|
);
|
||||||
|
CREATE TABLE _books (
|
||||||
|
id MEDIUMINT(8) UNSIGNED AUTO_INCREMENT,
|
||||||
|
title VARCHAR(100),
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
CREATE TABLE _books2authors (
|
||||||
|
author_id MEDIUMINT(8) DEFAULT 0,
|
||||||
|
book_id MEDIUMINT(8) DEFAULT 0,
|
||||||
|
index(author_id),
|
||||||
|
index(book_id)
|
||||||
|
);
|
||||||
|
INSERT INTO _authors (name, some_field) VALUES
|
||||||
|
('author1', 1),('author2', 2),('author3', 3);
|
||||||
|
INSERT INTO _books (title) VALUES
|
||||||
|
('book1'),('book2'),('book3');
|
||||||
|
INSERT INTO _books2authors (author_id, book_id) VALUES
|
||||||
|
(2,1),(3,2),(3,3);
|
||||||
|
SELECT A.id,
|
||||||
|
GROUP_CONCAT(B.title ORDER BY B.title DESC SEPARATOR ',') AS books,
|
||||||
|
some_field-1 AS having_field
|
||||||
|
FROM _authors A
|
||||||
|
LEFT JOIN _books2authors B2A FORCE INDEX(author_id)
|
||||||
|
ON B2A.author_id = A.id
|
||||||
|
LEFT JOIN
|
||||||
|
_books B ON B.id = B2A.book_id
|
||||||
|
GROUP BY A.id
|
||||||
|
HAVING having_field < 1
|
||||||
|
ORDER BY having_field ASC;
|
||||||
|
id books having_field
|
||||||
|
1 NULL 0
|
||||||
|
DROP TABLE _authors, _books, _books2authors;
|
||||||
|
#
|
||||||
|
# Bug#17055185: WRONG RESULTS WHEN RUNNING A SELECT THAT INCLUDE
|
||||||
|
# A HAVING BASED ON A FUNCTION.
|
||||||
|
#
|
||||||
|
CREATE TABLE series (
|
||||||
|
val INT(10) UNSIGNED NOT NULL
|
||||||
|
);
|
||||||
|
INSERT INTO series VALUES(1);
|
||||||
|
CREATE FUNCTION next_seq_value() RETURNS INT
|
||||||
|
BEGIN
|
||||||
|
DECLARE next_val INT;
|
||||||
|
SELECT val INTO next_val FROM series;
|
||||||
|
UPDATE series SET val=mod(val + 1, 2);
|
||||||
|
RETURN next_val;
|
||||||
|
END;
|
||||||
|
|
|
||||||
|
CREATE TABLE t1 (t INT, u INT, KEY(t));
|
||||||
|
INSERT INTO t1 VALUES(10, 10), (11, 11), (12, 12), (12, 13),(14, 15), (15, 16),
|
||||||
|
(16, 17), (17, 17);
|
||||||
|
ANALYZE TABLE t1;
|
||||||
|
Table Op Msg_type Msg_text
|
||||||
|
test.t1 analyze status OK
|
||||||
|
SELECT t, next_seq_value() r FROM t1 FORCE INDEX(t)
|
||||||
|
GROUP BY t HAVING r = 1 ORDER BY t1.u;
|
||||||
|
t r
|
||||||
|
10 1
|
||||||
|
12 1
|
||||||
|
15 1
|
||||||
|
17 1
|
||||||
|
DROP TABLE t1;
|
||||||
|
DROP FUNCTION next_seq_value;
|
||||||
|
DROP TABLE series;
|
||||||
|
@ -758,3 +758,83 @@ DROP VIEW v1;
|
|||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
--echo End of 10.1 tests
|
--echo End of 10.1 tests
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-14093: GROUP BY with HAVING over function + ORDER BY
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
CREATE TABLE _authors (
|
||||||
|
id MEDIUMINT(8) UNSIGNED AUTO_INCREMENT,
|
||||||
|
name VARCHAR(100),
|
||||||
|
some_field MEDIUMINT(8) UNSIGNED,
|
||||||
|
PRIMARY KEY (id),
|
||||||
|
index(some_field)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE _books (
|
||||||
|
id MEDIUMINT(8) UNSIGNED AUTO_INCREMENT,
|
||||||
|
title VARCHAR(100),
|
||||||
|
PRIMARY KEY (id)
|
||||||
|
);
|
||||||
|
CREATE TABLE _books2authors (
|
||||||
|
author_id MEDIUMINT(8) DEFAULT 0,
|
||||||
|
book_id MEDIUMINT(8) DEFAULT 0,
|
||||||
|
index(author_id),
|
||||||
|
index(book_id)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO _authors (name, some_field) VALUES
|
||||||
|
('author1', 1),('author2', 2),('author3', 3);
|
||||||
|
|
||||||
|
INSERT INTO _books (title) VALUES
|
||||||
|
('book1'),('book2'),('book3');
|
||||||
|
|
||||||
|
INSERT INTO _books2authors (author_id, book_id) VALUES
|
||||||
|
(2,1),(3,2),(3,3);
|
||||||
|
|
||||||
|
SELECT A.id,
|
||||||
|
GROUP_CONCAT(B.title ORDER BY B.title DESC SEPARATOR ',') AS books,
|
||||||
|
some_field-1 AS having_field
|
||||||
|
FROM _authors A
|
||||||
|
LEFT JOIN _books2authors B2A FORCE INDEX(author_id)
|
||||||
|
ON B2A.author_id = A.id
|
||||||
|
LEFT JOIN
|
||||||
|
_books B ON B.id = B2A.book_id
|
||||||
|
GROUP BY A.id
|
||||||
|
HAVING having_field < 1
|
||||||
|
ORDER BY having_field ASC;
|
||||||
|
|
||||||
|
DROP TABLE _authors, _books, _books2authors;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Bug#17055185: WRONG RESULTS WHEN RUNNING A SELECT THAT INCLUDE
|
||||||
|
--echo # A HAVING BASED ON A FUNCTION.
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
# Generate series 1, 0, 1, 0....
|
||||||
|
CREATE TABLE series (
|
||||||
|
val INT(10) UNSIGNED NOT NULL
|
||||||
|
);
|
||||||
|
INSERT INTO series VALUES(1);
|
||||||
|
|
||||||
|
DELIMITER |;
|
||||||
|
CREATE FUNCTION next_seq_value() RETURNS INT
|
||||||
|
BEGIN
|
||||||
|
DECLARE next_val INT;
|
||||||
|
SELECT val INTO next_val FROM series;
|
||||||
|
UPDATE series SET val=mod(val + 1, 2);
|
||||||
|
RETURN next_val;
|
||||||
|
END;
|
||||||
|
|
|
||||||
|
DELIMITER ;|
|
||||||
|
|
||||||
|
CREATE TABLE t1 (t INT, u INT, KEY(t));
|
||||||
|
INSERT INTO t1 VALUES(10, 10), (11, 11), (12, 12), (12, 13),(14, 15), (15, 16),
|
||||||
|
(16, 17), (17, 17);
|
||||||
|
ANALYZE TABLE t1;
|
||||||
|
SELECT t, next_seq_value() r FROM t1 FORCE INDEX(t)
|
||||||
|
GROUP BY t HAVING r = 1 ORDER BY t1.u;
|
||||||
|
|
||||||
|
DROP TABLE t1;
|
||||||
|
DROP FUNCTION next_seq_value;
|
||||||
|
DROP TABLE series;
|
||||||
|
@ -135,7 +135,7 @@ String *Item_func_geometry_from_json::val_str(String *str)
|
|||||||
{
|
{
|
||||||
String *sv= args[1]->val_str(&tmp_js);
|
String *sv= args[1]->val_str(&tmp_js);
|
||||||
my_error(ER_WRONG_VALUE_FOR_TYPE, MYF(0),
|
my_error(ER_WRONG_VALUE_FOR_TYPE, MYF(0),
|
||||||
"option", sv->c_ptr(), "ST_GeometryFromJSON");
|
"option", sv->c_ptr_safe(), "ST_GeometryFromJSON");
|
||||||
null_value= 1;
|
null_value= 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -2260,6 +2260,76 @@ derived_exit:
|
|||||||
DBUG_RETURN(0);
|
DBUG_RETURN(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Add having condition as a where clause condition of the given temp table.
|
||||||
|
|
||||||
|
@param tab Table to which having condition is added.
|
||||||
|
|
||||||
|
@returns false if success, true if error.
|
||||||
|
*/
|
||||||
|
|
||||||
|
bool JOIN::add_having_as_table_cond(JOIN_TAB *tab)
|
||||||
|
{
|
||||||
|
tmp_having->update_used_tables();
|
||||||
|
table_map used_tables= tab->table->map | OUTER_REF_TABLE_BIT;
|
||||||
|
|
||||||
|
/* If tmp table is not used then consider conditions of const table also */
|
||||||
|
if (!need_tmp)
|
||||||
|
used_tables|= const_table_map;
|
||||||
|
|
||||||
|
DBUG_ENTER("JOIN::add_having_as_table_cond");
|
||||||
|
|
||||||
|
Item* sort_table_cond= make_cond_for_table(thd, tmp_having, used_tables,
|
||||||
|
(table_map) 0, false,
|
||||||
|
false, false);
|
||||||
|
if (sort_table_cond)
|
||||||
|
{
|
||||||
|
if (!tab->select)
|
||||||
|
{
|
||||||
|
if (!(tab->select= new SQL_SELECT))
|
||||||
|
DBUG_RETURN(true);
|
||||||
|
tab->select->head= tab->table;
|
||||||
|
}
|
||||||
|
if (!tab->select->cond)
|
||||||
|
tab->select->cond= sort_table_cond;
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (!(tab->select->cond=
|
||||||
|
new (thd->mem_root) Item_cond_and(thd,
|
||||||
|
tab->select->cond,
|
||||||
|
sort_table_cond)))
|
||||||
|
DBUG_RETURN(true);
|
||||||
|
}
|
||||||
|
if (tab->pre_idx_push_select_cond)
|
||||||
|
{
|
||||||
|
if (sort_table_cond->type() == Item::COND_ITEM)
|
||||||
|
sort_table_cond= sort_table_cond->copy_andor_structure(thd);
|
||||||
|
if (!(tab->pre_idx_push_select_cond=
|
||||||
|
new (thd->mem_root) Item_cond_and(thd,
|
||||||
|
tab->pre_idx_push_select_cond,
|
||||||
|
sort_table_cond)))
|
||||||
|
DBUG_RETURN(true);
|
||||||
|
}
|
||||||
|
if (tab->select->cond && !tab->select->cond->fixed)
|
||||||
|
tab->select->cond->fix_fields(thd, 0);
|
||||||
|
if (tab->pre_idx_push_select_cond && !tab->pre_idx_push_select_cond->fixed)
|
||||||
|
tab->pre_idx_push_select_cond->fix_fields(thd, 0);
|
||||||
|
tab->select->pre_idx_push_select_cond= tab->pre_idx_push_select_cond;
|
||||||
|
tab->set_select_cond(tab->select->cond, __LINE__);
|
||||||
|
tab->select_cond->top_level_item();
|
||||||
|
DBUG_EXECUTE("where",print_where(tab->select->cond,
|
||||||
|
"select and having",
|
||||||
|
QT_ORDINARY););
|
||||||
|
|
||||||
|
having= make_cond_for_table(thd, tmp_having, ~ (table_map) 0,
|
||||||
|
~used_tables, false, false, false);
|
||||||
|
DBUG_EXECUTE("where",
|
||||||
|
print_where(having, "having after sort", QT_ORDINARY););
|
||||||
|
}
|
||||||
|
|
||||||
|
DBUG_RETURN(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Set info for aggregation tables
|
Set info for aggregation tables
|
||||||
@ -2289,6 +2359,7 @@ bool JOIN::make_aggr_tables_info()
|
|||||||
TABLE *exec_tmp_table= NULL;
|
TABLE *exec_tmp_table= NULL;
|
||||||
bool distinct= false;
|
bool distinct= false;
|
||||||
bool keep_row_order= false;
|
bool keep_row_order= false;
|
||||||
|
bool is_having_added_as_table_cond= false;
|
||||||
DBUG_ENTER("JOIN::make_aggr_tables_info");
|
DBUG_ENTER("JOIN::make_aggr_tables_info");
|
||||||
|
|
||||||
const bool has_group_by= this->group;
|
const bool has_group_by= this->group;
|
||||||
@ -2475,29 +2546,6 @@ bool JOIN::make_aggr_tables_info()
|
|||||||
if (exec_tmp_table->distinct)
|
if (exec_tmp_table->distinct)
|
||||||
optimize_distinct();
|
optimize_distinct();
|
||||||
|
|
||||||
/*
|
|
||||||
We don't have to store rows in temp table that doesn't match HAVING if:
|
|
||||||
- we are sorting the table and writing complete group rows to the
|
|
||||||
temp table.
|
|
||||||
- We are using DISTINCT without resolving the distinct as a GROUP BY
|
|
||||||
on all columns.
|
|
||||||
|
|
||||||
If having is not handled here, it will be checked before the row
|
|
||||||
is sent to the client.
|
|
||||||
|
|
||||||
In the case of window functions however, we *must* make sure to not
|
|
||||||
store any rows which don't match HAVING within the temp table,
|
|
||||||
as rows will end up being used during their computation.
|
|
||||||
*/
|
|
||||||
if (having &&
|
|
||||||
(sort_and_group || (exec_tmp_table->distinct && !group_list) ||
|
|
||||||
select_lex->have_window_funcs()))
|
|
||||||
{
|
|
||||||
/* Attach HAVING to tmp table's condition */
|
|
||||||
curr_tab->having= having;
|
|
||||||
having= NULL; /* Already done */
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Change sum_fields reference to calculated fields in tmp_table */
|
/* Change sum_fields reference to calculated fields in tmp_table */
|
||||||
items1= ref_ptr_array_slice(2);
|
items1= ref_ptr_array_slice(2);
|
||||||
if ((sort_and_group || curr_tab->table->group ||
|
if ((sort_and_group || curr_tab->table->group ||
|
||||||
@ -2525,6 +2573,38 @@ bool JOIN::make_aggr_tables_info()
|
|||||||
curr_tab->fields= &tmp_fields_list1;
|
curr_tab->fields= &tmp_fields_list1;
|
||||||
set_postjoin_aggr_write_func(curr_tab);
|
set_postjoin_aggr_write_func(curr_tab);
|
||||||
|
|
||||||
|
/*
|
||||||
|
If having is not handled here, it will be checked before the row is sent
|
||||||
|
to the client.
|
||||||
|
*/
|
||||||
|
if (tmp_having &&
|
||||||
|
(sort_and_group || (exec_tmp_table->distinct && !group_list) ||
|
||||||
|
select_lex->have_window_funcs()))
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
If there is no select distinct and there are no window functions
|
||||||
|
then move the having to table conds of tmp table.
|
||||||
|
NOTE : We cannot apply having after distinct or window functions
|
||||||
|
If columns of having are not part of select distinct,
|
||||||
|
then distinct may remove rows which can satisfy having.
|
||||||
|
In the case of window functions we *must* make sure to not
|
||||||
|
store any rows which don't match HAVING within the temp table,
|
||||||
|
as rows will end up being used during their computation.
|
||||||
|
*/
|
||||||
|
if (!select_distinct && !select_lex->have_window_funcs() &&
|
||||||
|
add_having_as_table_cond(curr_tab))
|
||||||
|
DBUG_RETURN(true);
|
||||||
|
is_having_added_as_table_cond= tmp_having != having;
|
||||||
|
|
||||||
|
/*
|
||||||
|
Having condition which we are not able to add as tmp table conds are
|
||||||
|
kept as before. And, this will be applied before storing the rows in
|
||||||
|
tmp table.
|
||||||
|
*/
|
||||||
|
curr_tab->having= having;
|
||||||
|
having= NULL; // Already done
|
||||||
|
}
|
||||||
|
|
||||||
tmp_table_param.func_count= 0;
|
tmp_table_param.func_count= 0;
|
||||||
tmp_table_param.field_count+= tmp_table_param.func_count;
|
tmp_table_param.field_count+= tmp_table_param.func_count;
|
||||||
if (sort_and_group || curr_tab->table->group)
|
if (sort_and_group || curr_tab->table->group)
|
||||||
@ -2714,60 +2794,11 @@ bool JOIN::make_aggr_tables_info()
|
|||||||
DBUG_PRINT("info",("Sorting for send_result_set_metadata"));
|
DBUG_PRINT("info",("Sorting for send_result_set_metadata"));
|
||||||
THD_STAGE_INFO(thd, stage_sorting_result);
|
THD_STAGE_INFO(thd, stage_sorting_result);
|
||||||
/* If we have already done the group, add HAVING to sorted table */
|
/* If we have already done the group, add HAVING to sorted table */
|
||||||
if (tmp_having && !group_list && !sort_and_group)
|
if (tmp_having && !is_having_added_as_table_cond &&
|
||||||
|
!group_list && !sort_and_group)
|
||||||
{
|
{
|
||||||
// Some tables may have been const
|
if (add_having_as_table_cond(curr_tab))
|
||||||
tmp_having->update_used_tables();
|
|
||||||
table_map used_tables= (const_table_map | curr_tab->table->map);
|
|
||||||
|
|
||||||
Item* sort_table_cond= make_cond_for_table(thd, tmp_having, used_tables,
|
|
||||||
(table_map) 0, false,
|
|
||||||
false, false);
|
|
||||||
if (sort_table_cond)
|
|
||||||
{
|
|
||||||
if (!curr_tab->select)
|
|
||||||
{
|
|
||||||
if (!(curr_tab->select= new SQL_SELECT))
|
|
||||||
DBUG_RETURN(true);
|
DBUG_RETURN(true);
|
||||||
curr_tab->select->head= curr_tab->table;
|
|
||||||
}
|
|
||||||
if (!curr_tab->select->cond)
|
|
||||||
curr_tab->select->cond= sort_table_cond;
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if (!(curr_tab->select->cond=
|
|
||||||
new (thd->mem_root) Item_cond_and(thd, curr_tab->select->cond,
|
|
||||||
sort_table_cond)))
|
|
||||||
DBUG_RETURN(true);
|
|
||||||
}
|
|
||||||
if (curr_tab->pre_idx_push_select_cond)
|
|
||||||
{
|
|
||||||
if (sort_table_cond->type() == Item::COND_ITEM)
|
|
||||||
sort_table_cond= sort_table_cond->copy_andor_structure(thd);
|
|
||||||
if (!(curr_tab->pre_idx_push_select_cond=
|
|
||||||
new (thd->mem_root) Item_cond_and(thd,
|
|
||||||
curr_tab->pre_idx_push_select_cond,
|
|
||||||
sort_table_cond)))
|
|
||||||
DBUG_RETURN(true);
|
|
||||||
}
|
|
||||||
if (curr_tab->select->cond && !curr_tab->select->cond->fixed)
|
|
||||||
curr_tab->select->cond->fix_fields(thd, 0);
|
|
||||||
if (curr_tab->pre_idx_push_select_cond &&
|
|
||||||
!curr_tab->pre_idx_push_select_cond->fixed)
|
|
||||||
curr_tab->pre_idx_push_select_cond->fix_fields(thd, 0);
|
|
||||||
curr_tab->select->pre_idx_push_select_cond=
|
|
||||||
curr_tab->pre_idx_push_select_cond;
|
|
||||||
curr_tab->set_select_cond(curr_tab->select->cond, __LINE__);
|
|
||||||
curr_tab->select_cond->top_level_item();
|
|
||||||
DBUG_EXECUTE("where",print_where(curr_tab->select->cond,
|
|
||||||
"select and having",
|
|
||||||
QT_ORDINARY););
|
|
||||||
|
|
||||||
having= make_cond_for_table(thd, tmp_having, ~ (table_map) 0,
|
|
||||||
~used_tables, false, false, false);
|
|
||||||
DBUG_EXECUTE("where",
|
|
||||||
print_where(having, "having after sort", QT_ORDINARY););
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (group)
|
if (group)
|
||||||
|
@ -1709,6 +1709,7 @@ private:
|
|||||||
void optimize_distinct();
|
void optimize_distinct();
|
||||||
|
|
||||||
void cleanup_item_list(List<Item> &items) const;
|
void cleanup_item_list(List<Item> &items) const;
|
||||||
|
bool add_having_as_table_cond(JOIN_TAB *tab);
|
||||||
bool make_aggr_tables_info();
|
bool make_aggr_tables_info();
|
||||||
|
|
||||||
};
|
};
|
||||||
|
@ -4523,7 +4523,7 @@ prepare_inplace_alter_table_dict(
|
|||||||
if (alt_opt.encryption != opt.encryption
|
if (alt_opt.encryption != opt.encryption
|
||||||
|| alt_opt.encryption_key_id
|
|| alt_opt.encryption_key_id
|
||||||
!= opt.encryption_key_id) {
|
!= opt.encryption_key_id) {
|
||||||
key_id = alt_opt.encryption_key_id;
|
key_id = uint32_t(alt_opt.encryption_key_id);
|
||||||
mode = fil_encryption_t(alt_opt.encryption);
|
mode = fil_encryption_t(alt_opt.encryption);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5382,13 +5382,21 @@ fallback:
|
|||||||
} while (err == EINTR
|
} while (err == EINTR
|
||||||
&& srv_shutdown_state == SRV_SHUTDOWN_NONE);
|
&& srv_shutdown_state == SRV_SHUTDOWN_NONE);
|
||||||
|
|
||||||
if (err) {
|
switch (err) {
|
||||||
|
case 0:
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
ib::error() << "preallocating "
|
ib::error() << "preallocating "
|
||||||
<< size << " bytes for file " << name
|
<< size << " bytes for file " << name
|
||||||
<< " failed with error " << err;
|
<< " failed with error " << err;
|
||||||
}
|
/* fall through */
|
||||||
|
case EINTR:
|
||||||
errno = err;
|
errno = err;
|
||||||
return(!err);
|
return false;
|
||||||
|
case EINVAL:
|
||||||
|
/* fall back to the code below */
|
||||||
|
break;
|
||||||
|
}
|
||||||
# endif /* HAVE_POSIX_ALLOCATE */
|
# endif /* HAVE_POSIX_ALLOCATE */
|
||||||
#endif /* _WIN32*/
|
#endif /* _WIN32*/
|
||||||
|
|
||||||
@ -5410,14 +5418,9 @@ fallback:
|
|||||||
memset(buf, 0, buf_size);
|
memset(buf, 0, buf_size);
|
||||||
|
|
||||||
os_offset_t current_size = os_file_get_size(file);
|
os_offset_t current_size = os_file_get_size(file);
|
||||||
bool write_progress_info =
|
|
||||||
(size - current_size >= (os_offset_t) 100 << 20);
|
|
||||||
|
|
||||||
if (write_progress_info) {
|
while (current_size < size
|
||||||
ib::info() << "Progress in MB:";
|
&& srv_shutdown_state == SRV_SHUTDOWN_NONE) {
|
||||||
}
|
|
||||||
|
|
||||||
while (current_size < size) {
|
|
||||||
ulint n_bytes;
|
ulint n_bytes;
|
||||||
|
|
||||||
if (size - current_size < (os_offset_t) buf_size) {
|
if (size - current_size < (os_offset_t) buf_size) {
|
||||||
@ -5433,32 +5436,15 @@ fallback:
|
|||||||
request, name, file, buf, current_size, n_bytes);
|
request, name, file, buf, current_size, n_bytes);
|
||||||
|
|
||||||
if (err != DB_SUCCESS) {
|
if (err != DB_SUCCESS) {
|
||||||
|
break;
|
||||||
ut_free(buf2);
|
|
||||||
return(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Print about progress for each 100 MB written */
|
|
||||||
if (write_progress_info &&
|
|
||||||
((current_size + n_bytes) / (100 << 20)
|
|
||||||
!= current_size / (100 << 20))) {
|
|
||||||
|
|
||||||
fprintf(stderr, " %lu00",
|
|
||||||
(ulong) ((current_size + n_bytes)
|
|
||||||
/ (100 << 20)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
current_size += n_bytes;
|
current_size += n_bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (write_progress_info) {
|
|
||||||
|
|
||||||
fprintf(stderr, "\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
ut_free(buf2);
|
ut_free(buf2);
|
||||||
|
|
||||||
return(os_file_flush(file));
|
return(current_size >= size && os_file_flush(file));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Truncates a file to a specified size in bytes.
|
/** Truncates a file to a specified size in bytes.
|
||||||
|
Reference in New Issue
Block a user