mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
MDEV-26590: Stack smashing/buffer overflow in Histogram_json_hb::parse
Provide buffer of sufficient size.
This commit is contained in:
@ -7445,3 +7445,18 @@ histogram
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
drop table t10;
|
drop table t10;
|
||||||
|
#
|
||||||
|
# MDEV-26590: Stack smashing/buffer overflow in Histogram_json_hb::parse upon UPDATE on table with long VARCHAR
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (b INT, a VARCHAR(3176));
|
||||||
|
INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
|
||||||
|
SET histogram_type= JSON_HB;
|
||||||
|
ANALYZE TABLE t1 PERSISTENT FOR ALL;
|
||||||
|
Table Op Msg_type Msg_text
|
||||||
|
test.t1 analyze status Engine-independent statistics collected
|
||||||
|
test.t1 analyze status OK
|
||||||
|
SELECT * FROM t1;
|
||||||
|
b a
|
||||||
|
1 foo
|
||||||
|
2 bar
|
||||||
|
drop table t1;
|
||||||
|
@ -170,5 +170,16 @@ set histogram_size=10, histogram_type='json_hb';
|
|||||||
analyze table t10 persistent for all;
|
analyze table t10 persistent for all;
|
||||||
select histogram
|
select histogram
|
||||||
from mysql.column_stats where table_name='t10' and db_name=database();
|
from mysql.column_stats where table_name='t10' and db_name=database();
|
||||||
|
|
||||||
drop table t10;
|
drop table t10;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-26590: Stack smashing/buffer overflow in Histogram_json_hb::parse upon UPDATE on table with long VARCHAR
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
CREATE TABLE t1 (b INT, a VARCHAR(3176));
|
||||||
|
INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
|
||||||
|
SET histogram_type= JSON_HB;
|
||||||
|
ANALYZE TABLE t1 PERSISTENT FOR ALL;
|
||||||
|
SELECT * FROM t1;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
|
@ -272,6 +272,7 @@ bool Histogram_json_hb::parse(MEM_ROOT *mem_root, Field *field,
|
|||||||
int obj1_len;
|
int obj1_len;
|
||||||
double cumulative_size= 0.0;
|
double cumulative_size= 0.0;
|
||||||
size_t end_member_index= (size_t)-1;
|
size_t end_member_index= (size_t)-1;
|
||||||
|
StringBuffer<128> value_buf;
|
||||||
|
|
||||||
if (JSV_OBJECT != json_type(hist_data, hist_data + hist_data_len,
|
if (JSV_OBJECT != json_type(hist_data, hist_data + hist_data_len,
|
||||||
&obj1, &obj1_len))
|
&obj1, &obj1_len))
|
||||||
@ -370,13 +371,12 @@ bool Histogram_json_hb::parse(MEM_ROOT *mem_root, Field *field,
|
|||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
uchar buf[MAX_KEY_LENGTH];
|
|
||||||
uint len_to_copy= field->key_length();
|
uint len_to_copy= field->key_length();
|
||||||
field->store_text(val, val_len, &my_charset_bin);
|
field->store_text(val, val_len, &my_charset_bin);
|
||||||
uint bytes= field->get_key_image(buf, len_to_copy, Field::itRAW);
|
value_buf.alloc(field->pack_length());
|
||||||
|
uint bytes= field->get_key_image((uchar*)value_buf.ptr(), len_to_copy,
|
||||||
buckets.push_back({std::string((char*)buf, bytes), cumulative_size,
|
Field::itRAW);
|
||||||
|
buckets.push_back({std::string(value_buf.ptr(), bytes), cumulative_size,
|
||||||
ndv_ll});
|
ndv_ll});
|
||||||
|
|
||||||
// Read the "end" field
|
// Read the "end" field
|
||||||
@ -393,8 +393,10 @@ bool Histogram_json_hb::parse(MEM_ROOT *mem_root, Field *field,
|
|||||||
if (ret != JSV_NOTHING)
|
if (ret != JSV_NOTHING)
|
||||||
{
|
{
|
||||||
field->store_text(end_val, end_val_len, &my_charset_bin);
|
field->store_text(end_val, end_val_len, &my_charset_bin);
|
||||||
uint bytes= field->get_key_image(buf, len_to_copy, Field::itRAW);
|
value_buf.alloc(field->pack_length());
|
||||||
last_bucket_end_endp.assign((char*)buf, bytes);
|
uint bytes= field->get_key_image((uchar*)value_buf.ptr(), len_to_copy,
|
||||||
|
Field::itRAW);
|
||||||
|
last_bucket_end_endp.assign(value_buf.ptr(), bytes);
|
||||||
if (end_member_index == (size_t)-1)
|
if (end_member_index == (size_t)-1)
|
||||||
end_member_index= buckets.size();
|
end_member_index= buckets.size();
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user