1
0
mirror of https://github.com/MariaDB/server.git synced 2025-06-06 05:21:19 +03:00

Merge branch bb-10.2-release into bb-10.3-release

This commit is contained in:
Nikita Malyavin 2021-05-04 14:49:31 +03:00
commit a8a925dd22
88 changed files with 1696 additions and 469 deletions

View File

@ -14,7 +14,8 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.7)
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.12)
PROJECT(MySQL)
IF(POLICY CMP0022)
CMAKE_POLICY(SET CMP0022 NEW)
@ -41,8 +42,16 @@ IF(NOT DEFINED MANUFACTURER)
MARK_AS_ADVANCED(MANUFACTURER)
ENDIF()
SET(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING
"Choose the type of build, options are: None(CMAKE_CXX_FLAGS or CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel")
IF(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
# Setting build type to RelWithDebInfo as none was specified.")
SET(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING
"Choose the type of build, options are: None(CMAKE_CXX_FLAGS or CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel"
FORCE)
# Set the possible values of build type for cmake-gui
SET_PROPERTY(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS
"None" "Debug" "Release" "MinSizeRel" "RelWithDebInfo")
ENDIF()
# MAX_INDEXES - Set the maximum number of indexes per table, default 64
IF (NOT MAX_INDEXES)
@ -71,18 +80,8 @@ IF(UNIX AND NOT APPLE)
MARK_AS_ADVANCED(WITH_PIC)
ENDIF()
# Optionally set project name, e.g.
# foo.xcodeproj (mac) or foo.sln (windows)
# This is used by TokuDB only
SET(MYSQL_PROJECT_NAME_DOCSTRING "MySQL project name")
IF(DEFINED MYSQL_PROJECT_NAME)
SET(MYSQL_PROJECT_NAME ${MYSQL_PROJECT_NAME} CACHE STRING
${MYSQL_PROJECT_NAME_DOCSTRING} FORCE)
ELSE()
SET(MYSQL_PROJECT_NAME "MySQL" CACHE STRING
${MYSQL_PROJECT_NAME_DOCSTRING} FORCE)
MARK_AS_ADVANCED(MYSQL_PROJECT_NAME)
ENDIF()
PROJECT(${MYSQL_PROJECT_NAME})
SET(CPACK_PACKAGE_NAME "MariaDB")
SET(CPACK_PACKAGE_DESCRIPTION_SUMMARY "MariaDB: a very fast and robust SQL database server")

@ -1 +1 @@
Subproject commit d19c7c69269fdf4e2af8943dd86c12e4e1664afd
Subproject commit 180c543704d627a50a52aaf60e24ca14e0ec4686

View File

@ -0,0 +1,15 @@
CREATE TABLE t1 (c INT);
CREATE TABLE t2 (c INT);
LOCK TABLES t1 WRITE, t2 READ;
CREATE TABLE IF NOT EXISTS t1 LIKE t2;
Warnings:
Note 1050 Table 't1' already exists
UNLOCK TABLES;
LOCK TABLES t1 READ , t2 READ;
CREATE TABLE IF NOT EXISTS t1 LIKE t2;
ERROR HY000: Table 't1' was locked with a READ lock and can't be updated
UNLOCK TABLES;
CREATE TABLE IF NOT EXISTS t1 LIKE t2;
Warnings:
Note 1050 Table 't1' already exists
DROP TABLES t1,t2;

View File

@ -0,0 +1,15 @@
CREATE TABLE t1 (c INT);
CREATE TABLE t2 (c INT);
LOCK TABLES t1 WRITE, t2 READ;
CREATE TABLE IF NOT EXISTS t1 LIKE t2;
UNLOCK TABLES;
LOCK TABLES t1 READ , t2 READ;
--error ER_TABLE_NOT_LOCKED_FOR_WRITE
CREATE TABLE IF NOT EXISTS t1 LIKE t2;
UNLOCK TABLES;
CREATE TABLE IF NOT EXISTS t1 LIKE t2;
DROP TABLES t1,t2;

View File

@ -30,3 +30,38 @@ disconnect con2;
USE test;
DROP PROCEDURE p_install;
DROP PROCEDURE p_show_vars;
#
# Bug#29363867: LOST CONNECTION TO MYSQL SERVER DURING QUERY
#
## prepared SET with a plugin variable prevents uninstall
install plugin query_response_time soname 'query_response_time';
prepare s from 'set global query_response_time_range_base=16';
select plugin_status from information_schema.plugins where plugin_name='query_response_time';
plugin_status
ACTIVE
uninstall plugin query_response_time;
Warnings:
Warning 1620 Plugin is busy and will be uninstalled on shutdown
execute s;
execute s;
select plugin_status from information_schema.plugins where plugin_name='query_response_time';
plugin_status
DELETED
deallocate prepare s;
select plugin_status from information_schema.plugins where plugin_name='query_response_time';
plugin_status
## prepared SET mentioning a plugin otherwise does not prevent uninstall
install plugin archive soname 'ha_archive';
create table t1 (a int) engine=archive;
insert t1 values (1),(2),(3);
prepare s from 'set session auto_increment_increment=(select count(*) from t1)';
flush tables;
select plugin_status from information_schema.plugins where plugin_name='archive';
plugin_status
ACTIVE
uninstall plugin archive;
select plugin_status from information_schema.plugins where plugin_name='archive';
plugin_status
execute s;
ERROR 42000: Unknown storage engine 'ARCHIVE'
drop table t1;

View File

@ -1,3 +1,10 @@
if (!$QUERY_RESPONSE_TIME_SO) {
skip Needs query_response_time loadable plugin;
}
if (!$HA_ARCHIVE_SO) {
skip Needs Archive loadable plugin;
}
--echo #
--echo # MDEV-5345 - Deadlock between mysql_change_user(), SHOW VARIABLES and
--echo # INSTALL PLUGIN
@ -54,3 +61,31 @@ disconnect con2;
USE test;
DROP PROCEDURE p_install;
DROP PROCEDURE p_show_vars;
--echo #
--echo # Bug#29363867: LOST CONNECTION TO MYSQL SERVER DURING QUERY
--echo #
--echo ## prepared SET with a plugin variable prevents uninstall
install plugin query_response_time soname 'query_response_time';
prepare s from 'set global query_response_time_range_base=16';
select plugin_status from information_schema.plugins where plugin_name='query_response_time';
uninstall plugin query_response_time;
execute s;
execute s;
select plugin_status from information_schema.plugins where plugin_name='query_response_time';
deallocate prepare s;
select plugin_status from information_schema.plugins where plugin_name='query_response_time';
--echo ## prepared SET mentioning a plugin otherwise does not prevent uninstall
install plugin archive soname 'ha_archive';
create table t1 (a int) engine=archive;
insert t1 values (1),(2),(3);
prepare s from 'set session auto_increment_increment=(select count(*) from t1)';
flush tables;
select plugin_status from information_schema.plugins where plugin_name='archive';
uninstall plugin archive;
select plugin_status from information_schema.plugins where plugin_name='archive';
--error ER_UNKNOWN_STORAGE_ENGINE
execute s;
drop table t1;

View File

@ -0,0 +1 @@
--enable-plugin-innodb-lock-waits --enable-plugin-innodb-trx

View File

@ -861,7 +861,14 @@ select * from t1 where pk between 10 and 20 for update;
# run SHOW EXPLAIN on a frozen thread
connection default;
let $save_wait_condition= $wait_condition;
let $wait_condition= select State='Sending data' from information_schema.processlist where id=$thr2;
let $wait_condition=
select 1
from information_schema.INNODB_LOCK_WAITS
where
requesting_trx_id=(select trx_id
from information_schema.INNODB_TRX
where trx_mysql_thread_id=$thr2);
let $thr_default=`select connection_id()`;
--source include/wait_condition.inc
--echo # do: send_eval show explain for thr2;

View File

@ -8487,6 +8487,21 @@ ERROR HY000: View 'test.v1' references invalid table(s) or column(s) or function
DROP PROCEDURE p1;
DROP VIEW v1;
DROP TABLE t1;
#
# BUG#30366310: USING A FUNCTION TO ASSIGN DEFAULT VALUES TO
# 2 OR MORE VARIABLES CRASHES SERVER
#
create function f1() returns bigint return now()-1|
create procedure p1()
begin
declare b, c bigint default f1();
select b-c;
end|
call p1()|
b-c
0
drop procedure p1|
drop function f1|
#End of 10.2 tests
#
# MDEV-12007 Allow ROW variables as a cursor FETCH target

View File

@ -10026,6 +10026,25 @@ DROP PROCEDURE p1;
DROP VIEW v1;
DROP TABLE t1;
--echo #
--echo # BUG#30366310: USING A FUNCTION TO ASSIGN DEFAULT VALUES TO
--echo # 2 OR MORE VARIABLES CRASHES SERVER
--echo #
delimiter |;
create function f1() returns bigint return now()-1|
create procedure p1()
begin
declare b, c bigint default f1();
select b-c;
end|
call p1()|
drop procedure p1|
drop function f1|
delimiter ;|
--echo #End of 10.2 tests
--echo #

View File

@ -492,4 +492,12 @@ select * from mysql.plugin WHERE name='unexisting_udf';
name dl
DROP FUNCTION unexisting_udf;
ERROR 42000: FUNCTION test.unexisting_udf does not exist
#
# Bug #31674599: THE UDF_INIT() FUNCTION CAUSE SERVER CRASH
#
call mtr.add_suppression('Invalid row in mysql.func table');
insert mysql.func () values ();
delete from mysql.func where name = '';
#
# End of 10.2 tests
#

View File

@ -562,4 +562,14 @@ select * from mysql.plugin WHERE name='unexisting_udf';
--error ER_SP_DOES_NOT_EXIST
DROP FUNCTION unexisting_udf;
--echo #
--echo # Bug #31674599: THE UDF_INIT() FUNCTION CAUSE SERVER CRASH
--echo #
call mtr.add_suppression('Invalid row in mysql.func table');
insert mysql.func () values ();
source include/restart_mysqld.inc;
delete from mysql.func where name = '';
--echo #
--echo # End of 10.2 tests
--echo #

View File

@ -7125,7 +7125,7 @@ CALL sp1();
x y z
000 000 000
Warnings:
Warning 1264 Out of range value for column 'z' at row 1
Warning 1264 Out of range value for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@ -7163,7 +7163,7 @@ CALL sp1();
x y z
00000 00000 00000
Warnings:
Warning 1264 Out of range value for column 'z' at row 1
Warning 1264 Out of range value for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@ -7201,7 +7201,7 @@ CALL sp1();
x y z
00000000 00000000 00000000
Warnings:
Warning 1264 Out of range value for column 'z' at row 1
Warning 1264 Out of range value for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@ -7239,7 +7239,7 @@ CALL sp1();
x y z
0000000000 0000000000 0000000000
Warnings:
Warning 1264 Out of range value for column 'z' at row 1
Warning 1264 Out of range value for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@ -7277,7 +7277,7 @@ CALL sp1();
x y z
00000000000000000000 00000000000000000000 00000000000000000000
Warnings:
Warning 1264 Out of range value for column 'z' at row 1
Warning 1264 Out of range value for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@ -7297,7 +7297,7 @@ CALL sp1();
x y z
-9999999999 -9999999999 -9999999999
Warnings:
Warning 1264 Out of range value for column 'z' at row 1
Warning 1264 Out of range value for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@ -7308,7 +7308,7 @@ CALL sp1();
x y z
0 0 0
Warnings:
Note 1265 Data truncated for column 'z' at row 1
Note 1265 Data truncated for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@ -7319,7 +7319,7 @@ CALL sp1();
x y z
0000000000 0000000000 0000000000
Warnings:
Warning 1264 Out of range value for column 'z' at row 1
Warning 1264 Out of range value for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@ -7330,7 +7330,7 @@ CALL sp1();
x y z
0000000000 0000000000 0000000000
Warnings:
Note 1265 Data truncated for column 'z' at row 1
Note 1265 Data truncated for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@ -7341,7 +7341,7 @@ CALL sp1();
x y z
0 0 0
Warnings:
Note 1265 Data truncated for column 'z' at row 1
Note 1265 Data truncated for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@ -7352,7 +7352,7 @@ CALL sp1();
x y z
0 0 0
Warnings:
Note 1265 Data truncated for column 'z' at row 1
Note 1265 Data truncated for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@ -7363,7 +7363,7 @@ CALL sp1();
x y z
0000000000 0000000000 0000000000
Warnings:
Note 1265 Data truncated for column 'z' at row 1
Note 1265 Data truncated for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN
@ -7374,7 +7374,7 @@ CALL sp1();
x y z
0000000000 0000000000 0000000000
Warnings:
Note 1265 Data truncated for column 'z' at row 1
Note 1265 Data truncated for column 'x' at row 1
DROP PROCEDURE IF EXISTS sp1;
CREATE PROCEDURE sp1( )
BEGIN

View File

@ -0,0 +1,22 @@
CREATE TABLE t1(id int not null primary key, b int) engine=InnoDB;
INSERT INTO t1 VALUES (0,0),(1,1),(2,2),(3,3);
BEGIN;
UPDATE t1 set b = 100 where id between 1 and 2;;
connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connection node_1b;
SET @save_dbug = @@SESSION.debug_dbug;
SET @@SESSION.innodb_lock_wait_timeout=2;
SET @@SESSION.debug_dbug = '+d,wsrep_instrument_BF_lock_wait';
UPDATE t1 set b = 200 WHERE id = 1;
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
SET @@SESSION.debug_dbug = @save_dbug;
connection node_1;
COMMIT;
SELECT * FROM t1;
id b
0 0
1 100
2 100
3 3
disconnect node_1b;
DROP TABLE t1;

View File

@ -0,0 +1,19 @@
SHOW VARIABLES LIKE '%password%';
Variable_name Value
old_passwords OFF
report_password
strict_password_validation ON
CREATE USER 'user123456'@'localhost';
GRANT SELECT, INSERT, UPDATE ON test.* TO 'user123456'@'localhost';
SET PASSWORD FOR 'user123456'@'localhost' = PASSWORD('A$10abcdDCBA123456%7');
SHOW GRANTS FOR 'user123456'@'localhost';
Grants for user123456@localhost
GRANT USAGE ON *.* TO `user123456`@`localhost` IDENTIFIED BY PASSWORD '*5846CF4D641598B360B3562E581586155C59F65A'
GRANT SELECT, INSERT, UPDATE ON `test`.* TO `user123456`@`localhost`
connection node_2;
SHOW GRANTS FOR 'user123456'@'localhost';
Grants for user123456@localhost
GRANT USAGE ON *.* TO `user123456`@`localhost` IDENTIFIED BY PASSWORD '*5846CF4D641598B360B3562E581586155C59F65A'
GRANT SELECT, INSERT, UPDATE ON `test`.* TO `user123456`@`localhost`
connection node_1;
DROP USER 'user123456'@'localhost';

View File

@ -1,6 +1,6 @@
SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 4
1
SELECT VARIABLE_VALUE AS EXPECT_4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
EXPECT_4
4
connection node_1;
CREATE TABLE t1 (f1 INTEGER);
INSERT INTO t1 VALUES (1);
@ -46,30 +46,30 @@ INSERT INTO t1 VALUES (33);
connection node_4;
INSERT INTO t1 VALUES (341);
connection node_1;
SELECT COUNT(*) = 19 FROM t1;
COUNT(*) = 19
1
SELECT COUNT(*) AS EXPECT_19 FROM t1;
EXPECT_19
19
connection node_2;
SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 4
1
SELECT COUNT(*) = 19 FROM t1;
COUNT(*) = 19
1
SELECT VARIABLE_VALUE AS EXPECT_4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
EXPECT_4
4
SELECT COUNT(*) AS EXPECT_19 FROM t1;
EXPECT_19
19
connection node_3;
SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 4
1
SELECT COUNT(*) = 19 FROM t1;
COUNT(*) = 19
1
SELECT VARIABLE_VALUE AS EXPECT_4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
EXPECT_4
4
SELECT COUNT(*) AS EXPECT_19 FROM t1;
EXPECT_19
19
connection node_4;
SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
VARIABLE_VALUE = 4
1
SELECT COUNT(*) = 19 FROM t1;
COUNT(*) = 19
1
SELECT VARIABLE_VALUE AS EXPECT_4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
EXPECT_4
4
SELECT COUNT(*) AS EXPECT_19 FROM t1;
EXPECT_19
19
connection node_1;
DROP TABLE t1;
CALL mtr.add_suppression("There are no nodes in the same segment that will ever be able to become donors, yet there is a suitable donor outside");

View File

@ -0,0 +1,25 @@
--source include/galera_cluster.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
CREATE TABLE t1(id int not null primary key, b int) engine=InnoDB;
INSERT INTO t1 VALUES (0,0),(1,1),(2,2),(3,3);
BEGIN;
--send UPDATE t1 set b = 100 where id between 1 and 2;
--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1
--connection node_1b
SET @save_dbug = @@SESSION.debug_dbug;
SET @@SESSION.innodb_lock_wait_timeout=2;
SET @@SESSION.debug_dbug = '+d,wsrep_instrument_BF_lock_wait';
--error ER_LOCK_WAIT_TIMEOUT
UPDATE t1 set b = 200 WHERE id = 1;
SET @@SESSION.debug_dbug = @save_dbug;
--connection node_1
--reap
COMMIT;
SELECT * FROM t1;
--disconnect node_1b
DROP TABLE t1;

View File

@ -0,0 +1,14 @@
--source include/galera_cluster.inc
SHOW VARIABLES LIKE '%password%';
CREATE USER 'user123456'@'localhost';
GRANT SELECT, INSERT, UPDATE ON test.* TO 'user123456'@'localhost';
SET PASSWORD FOR 'user123456'@'localhost' = PASSWORD('A$10abcdDCBA123456%7');
SHOW GRANTS FOR 'user123456'@'localhost';
--connection node_2
SHOW GRANTS FOR 'user123456'@'localhost';
--connection node_1
DROP USER 'user123456'@'localhost';

View File

@ -10,9 +10,11 @@
--source include/big_test.inc
--source include/galera_cluster.inc
--source include/have_innodb.inc
--source include/force_restart.inc
SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--let $wait_condition = SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
SELECT VARIABLE_VALUE AS EXPECT_4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--connection node_1
CREATE TABLE t1 (f1 INTEGER);
@ -37,10 +39,11 @@ INSERT INTO t1 VALUES (4);
INSERT INTO t1 VALUES (13);
--source include/kill_galera.inc
--sleep 5
--connection node_1
--source include/wait_until_connected_again.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
INSERT INTO t1 VALUES (11);
--connection node_2
@ -51,9 +54,11 @@ INSERT INTO t1 VALUES (14);
--connection node_3
--source include/start_mysqld.inc
--sleep 5
--source include/wait_until_connected_again.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
INSERT INTO t1 VALUES (131);
#
@ -64,10 +69,12 @@ INSERT INTO t1 VALUES (131);
INSERT INTO t1 VALUES (22);
--source include/kill_galera.inc
--sleep 5
--connection node_1
--source include/wait_until_connected_again.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
INSERT INTO t1 VALUES (21);
--connection node_3
@ -78,8 +85,9 @@ INSERT INTO t1 VALUES (24);
--connection node_2
--source include/start_mysqld.inc
--sleep 5
--source include/wait_until_connected_again.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
INSERT INTO t1 VALUES (221);
@ -91,10 +99,11 @@ INSERT INTO t1 VALUES (221);
INSERT INTO t1 VALUES (34);
--source include/kill_galera.inc
--sleep 5
--connection node_1
--source include/wait_until_connected_again.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
INSERT INTO t1 VALUES (31);
--connection node_2
@ -105,8 +114,9 @@ INSERT INTO t1 VALUES (33);
--connection node_4
--source include/start_mysqld.inc
--sleep 5
--source include/wait_until_connected_again.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
INSERT INTO t1 VALUES (341);
@ -119,19 +129,19 @@ INSERT INTO t1 VALUES (341);
--let $wait_condition = SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
SELECT COUNT(*) = 19 FROM t1;
SELECT COUNT(*) AS EXPECT_19 FROM t1;
--connection node_2
SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
SELECT COUNT(*) = 19 FROM t1;
SELECT VARIABLE_VALUE AS EXPECT_4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
SELECT COUNT(*) AS EXPECT_19 FROM t1;
--connection node_3
SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
SELECT COUNT(*) = 19 FROM t1;
SELECT VARIABLE_VALUE AS EXPECT_4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
SELECT COUNT(*) AS EXPECT_19 FROM t1;
--connection node_4
SELECT VARIABLE_VALUE = 4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
SELECT COUNT(*) = 19 FROM t1;
SELECT VARIABLE_VALUE AS EXPECT_4 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
SELECT COUNT(*) AS EXPECT_19 FROM t1;
--connection node_1
DROP TABLE t1;

View File

@ -809,4 +809,12 @@ eval LOAD DATA INFILE '$MYSQLTEST_VARDIR/tmp/load.data' REPLACE INTO TABLE t1 (p
--remove_file $MYSQLTEST_VARDIR/tmp/load.data
DROP TABLE t1;
--echo # MDEV-19011 Assertion `file->s->base.reclength < file->s->vreclength'
--echo # failed in ha_myisam::setup_vcols_for_repair
CREATE TABLE t1 (a INT GENERATED ALWAYS AS (1) VIRTUAL) ENGINE=MyISAM;
ALTER TABLE t1 ADD KEY (a);
DROP TABLE t1;
}

View File

@ -669,3 +669,77 @@ PRIMARY KEY (number)
REPLACE t2(number) VALUES('1');
REPLACE t2(number) VALUES('1');
DROP TABLE t2;
# MDEV-24583 SELECT aborts after failed REPLACE into table with vcol
CREATE TABLE t1 (pk INT, a VARCHAR(3), v VARCHAR(3) AS (CONCAT('x-',a)),
PRIMARY KEY(pk)) ENGINE=MyISAM;
CREATE VIEW v1 AS SELECT * FROM t1;
INSERT INTO t1 (pk, a) VALUES (1,'foo');
SET sql_mode=CONCAT(@@sql_mode,',STRICT_ALL_TABLES');
REPLACE INTO t1 (pk,a) VALUES (1,'qux');
SELECT * FROM v1;
pk a v
1 foo x-f
DROP VIEW v1;
DROP TABLE t1;
CREATE TABLE t1 (
pk INT,
a VARCHAR(1),
v VARCHAR(1) AS (CONCAT('virt-',a)) VIRTUAL,
PRIMARY KEY (pk)
) ENGINE=InnoDB;
INSERT INTO t1 (pk,a) VALUES
(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'f');
REPLACE INTO t1 (pk) VALUES (1);
ERROR 22001: Data too long for column 'v' at row 1
SELECT * FROM t1 ORDER BY a;
pk a v
1 a v
2 b v
3 c v
4 d v
5 e v
6 f v
SET SQL_MODE=DEFAULT;
DROP TABLE t1;
# (duplicate) MDEV-24656
# [FATAL] InnoDB: Data field type 0, len 0, ASAN heap-buffer-overflow
# upon LOAD DATA with virtual columns
CREATE TABLE t1 (id INT PRIMARY KEY, a VARCHAR(2333),
va VARCHAR(171) AS (a)) ENGINE=InnoDB;
INSERT INTO t1 (id,a) VALUES (1,REPEAT('x',200));
SELECT id, va INTO OUTFILE 'load_t1' FROM t1;
LOAD DATA INFILE 'load_t1' REPLACE INTO TABLE t1 (id,va);
ERROR 22001: Data too long for column 'va' at row 1
SELECT * FROM t1;
id a va
1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
LOAD DATA INFILE 'load_t1' IGNORE INTO TABLE t1 (id,va);
Warnings:
Warning 1062 Duplicate entry '1' for key 'PRIMARY'
DROP TABLE t1;
CREATE TABLE t1 (id BIGINT PRIMARY KEY, a VARCHAR(2333),
va VARCHAR(171) AS (a)) ENGINE=InnoDB;
INSERT INTO t1 (id,a) VALUES (1,REPEAT('x',200));
SELECT id, va INTO OUTFILE 'load_t1' FROM t1;
LOAD DATA INFILE 'load_t1' REPLACE INTO TABLE t1 (id,va);
ERROR 22001: Data too long for column 'va' at row 1
SELECT * FROM t1;
id a va
1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
LOAD DATA INFILE 'load_t1' IGNORE INTO TABLE t1 (id,va);
Warnings:
Warning 1062 Duplicate entry '1' for key 'PRIMARY'
DROP TABLE t1;
# (duplicate) MDEV-24665
# ASAN errors, assertion failures, corrupt values after failed
# LOAD DATA into table with virtual/stored column
CREATE TABLE t1 (id INT PRIMARY KEY,
ts TIMESTAMP DEFAULT '1971-01-01 00:00:00',
c VARBINARY(8) DEFAULT '', vc VARCHAR(3) AS (c) STORED);
INSERT IGNORE INTO t1 (id,c) VALUES (1,'foobar');
Warnings:
Warning 1265 Data truncated for column 'vc' at row 1
SELECT id, ts, vc INTO OUTFILE 'load_t1' FROM t1;
LOAD DATA INFILE 'load_t1' REPLACE INTO TABLE t1 (id, ts, vc);
INSERT IGNORE INTO t1 (id) VALUES (2);
DROP TABLE t1;

View File

@ -877,6 +877,11 @@ Warning 1264 Out of range value for column 'vi' at row 1
LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/load.data' REPLACE INTO TABLE t1 (pk,i,ts);
ERROR 22003: Out of range value for column 'vi' at row 1
DROP TABLE t1;
# MDEV-19011 Assertion `file->s->base.reclength < file->s->vreclength'
# failed in ha_myisam::setup_vcols_for_repair
CREATE TABLE t1 (a INT GENERATED ALWAYS AS (1) VIRTUAL) ENGINE=MyISAM;
ALTER TABLE t1 ADD KEY (a);
DROP TABLE t1;
#
# BUG#21365158 WL8149:ASSERTION `!TABLE || (!TABLE->WRITE_SET
#

View File

@ -877,6 +877,11 @@ Warning 1264 Out of range value for column 'vi' at row 1
LOAD DATA INFILE 'MYSQLTEST_VARDIR/tmp/load.data' REPLACE INTO TABLE t1 (pk,i,ts);
ERROR 22003: Out of range value for column 'vi' at row 1
DROP TABLE t1;
# MDEV-19011 Assertion `file->s->base.reclength < file->s->vreclength'
# failed in ha_myisam::setup_vcols_for_repair
CREATE TABLE t1 (a INT GENERATED ALWAYS AS (1) VIRTUAL) ENGINE=MyISAM;
ALTER TABLE t1 ADD KEY (a);
DROP TABLE t1;
DROP VIEW IF EXISTS v1,v2;
DROP TABLE IF EXISTS t1,t2,t3;
DROP PROCEDURE IF EXISTS p1;

View File

@ -233,4 +233,76 @@ set debug_sync= "now WAIT_FOR got_no_such_table";
set global debug_dbug= @saved_dbug;
drop table t1;
set debug_sync=reset;
#
# MDEV-18546 ASAN heap-use-after-free
# in innobase_get_computed_value / row_purge
#
CREATE TABLE t1 (
pk INT AUTO_INCREMENT,
b BIT(15),
v BIT(15) AS (b) VIRTUAL,
PRIMARY KEY(pk),
UNIQUE(v)
) ENGINE=InnoDB;
INSERT IGNORE INTO t1 (b) VALUES
(NULL),(b'011'),(b'000110100'),
(b'01101101010'),(b'01111001001011'),(NULL);
SET GLOBAL innodb_debug_sync = "ib_clust_v_col_before_row_allocated "
"SIGNAL before_row_allocated "
"WAIT_FOR flush_unlock";
SET GLOBAL innodb_debug_sync = "ib_open_after_dict_open "
"SIGNAL purge_open "
"WAIT_FOR select_open";
set global debug_dbug= "d,ib_purge_virtual_index_callback";
connect purge_waiter,localhost,root;
SET debug_sync= "now WAIT_FOR before_row_allocated";
connection default;
REPLACE INTO t1 (pk, b) SELECT pk, b FROM t1;
connection purge_waiter;
connection default;
disconnect purge_waiter;
FLUSH TABLES;
SET GLOBAL innodb_debug_sync = reset;
SET debug_sync= "now SIGNAL flush_unlock WAIT_FOR purge_open";
SET GLOBAL innodb_debug_sync = reset;
SET debug_sync= "ib_open_after_dict_open SIGNAL select_open";
SELECT * FROM t1;
pk b v
1 NULL NULL
2  
3 4 4
4 j j
5 K K
6 NULL NULL
DROP TABLE t1;
SET debug_sync= reset;
set global debug_dbug= @old_dbug;
# MDEV-16962 Assertion '!error || !ot_ctx.can_recover_from_failed_open()'
# failed in open_purge_table upon concurrent ALTER and FLUSH
CREATE TABLE t1 (
pk SERIAL,
c VARCHAR(128),
d DATE,
vd DATE AS (d) VIRTUAL,
PRIMARY KEY(pk),
KEY(vd,c)
) ENGINE=InnoDB;
INSERT IGNORE INTO t1 (pk,c) VALUES (1,'foo');
set debug_sync="now WAIT_FOR purge";
connect con1,localhost,root,,test;
SET GLOBAL innodb_debug_sync="after_open_table_mdl_shared SIGNAL purge WAIT_FOR flush";
SET global debug_dbug="d,ib_purge_virtual_index_callback";
REPLACE INTO t1 (pk,c) VALUES (1,'bar');
connection default;
SET debug_sync="alter_table_before_rename_result_table WAIT_FOR flush";
ALTER TABLE t1 ADD FULLTEXT KEY(c), ALGORITHM=COPY;
connection con1;
SET debug_sync="after_flush_unlock SIGNAL flush ";
FLUSH TABLES;
disconnect con1;
connection default;
InnoDB 0 transactions not purged
DROP TABLE t1;
SET debug_sync= reset;
SET global debug_dbug=@old_dbug;
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;

View File

@ -634,3 +634,87 @@ REPLACE t2(number) VALUES('1');
REPLACE t2(number) VALUES('1');
DROP TABLE t2;
--echo # MDEV-24583 SELECT aborts after failed REPLACE into table with vcol
CREATE TABLE t1 (pk INT, a VARCHAR(3), v VARCHAR(3) AS (CONCAT('x-',a)),
PRIMARY KEY(pk)) ENGINE=MyISAM;
CREATE VIEW v1 AS SELECT * FROM t1;
INSERT INTO t1 (pk, a) VALUES (1,'foo');
SET sql_mode=CONCAT(@@sql_mode,',STRICT_ALL_TABLES');
--error 0,ER_DATA_TOO_LONG
REPLACE INTO t1 (pk,a) VALUES (1,'qux');
SELECT * FROM v1;
# Cleanup
DROP VIEW v1;
DROP TABLE t1;
CREATE TABLE t1 (
pk INT,
a VARCHAR(1),
v VARCHAR(1) AS (CONCAT('virt-',a)) VIRTUAL,
PRIMARY KEY (pk)
) ENGINE=InnoDB;
INSERT INTO t1 (pk,a) VALUES
(1,'a'),(2,'b'),(3,'c'),(4,'d'),(5,'e'),(6,'f');
--error ER_DATA_TOO_LONG
REPLACE INTO t1 (pk) VALUES (1);
SELECT * FROM t1 ORDER BY a;
SET SQL_MODE=DEFAULT;
DROP TABLE t1;
--echo # (duplicate) MDEV-24656
--echo # [FATAL] InnoDB: Data field type 0, len 0, ASAN heap-buffer-overflow
--echo # upon LOAD DATA with virtual columns
CREATE TABLE t1 (id INT PRIMARY KEY, a VARCHAR(2333),
va VARCHAR(171) AS (a)) ENGINE=InnoDB;
INSERT INTO t1 (id,a) VALUES (1,REPEAT('x',200));
SELECT id, va INTO OUTFILE 'load_t1' FROM t1;
--error ER_DATA_TOO_LONG
LOAD DATA INFILE 'load_t1' REPLACE INTO TABLE t1 (id,va);
SELECT * FROM t1;
LOAD DATA INFILE 'load_t1' IGNORE INTO TABLE t1 (id,va);
DROP TABLE t1;
--let $datadir= `select @@datadir`
--remove_file $datadir/test/load_t1
CREATE TABLE t1 (id BIGINT PRIMARY KEY, a VARCHAR(2333),
va VARCHAR(171) AS (a)) ENGINE=InnoDB;
INSERT INTO t1 (id,a) VALUES (1,REPEAT('x',200));
SELECT id, va INTO OUTFILE 'load_t1' FROM t1;
--error ER_DATA_TOO_LONG
LOAD DATA INFILE 'load_t1' REPLACE INTO TABLE t1 (id,va);
SELECT * FROM t1;
LOAD DATA INFILE 'load_t1' IGNORE INTO TABLE t1 (id,va);
# Cleanup
DROP TABLE t1;
--let $datadir= `select @@datadir`
--remove_file $datadir/test/load_t1
--echo # (duplicate) MDEV-24665
--echo # ASAN errors, assertion failures, corrupt values after failed
--echo # LOAD DATA into table with virtual/stored column
CREATE TABLE t1 (id INT PRIMARY KEY,
ts TIMESTAMP DEFAULT '1971-01-01 00:00:00',
c VARBINARY(8) DEFAULT '', vc VARCHAR(3) AS (c) STORED);
INSERT IGNORE INTO t1 (id,c) VALUES (1,'foobar');
SELECT id, ts, vc INTO OUTFILE 'load_t1' FROM t1;
--error 0,ER_DATA_TOO_LONG
LOAD DATA INFILE 'load_t1' REPLACE INTO TABLE t1 (id, ts, vc);
INSERT IGNORE INTO t1 (id) VALUES (2);
# Cleanup
DROP TABLE t1;
--let $datadir= `select @@datadir`
--remove_file $datadir/test/load_t1

View File

@ -310,4 +310,115 @@ drop table t1;
--source include/wait_until_count_sessions.inc
set debug_sync=reset;
--echo #
--echo # MDEV-18546 ASAN heap-use-after-free
--echo # in innobase_get_computed_value / row_purge
--echo #
CREATE TABLE t1 (
pk INT AUTO_INCREMENT,
b BIT(15),
v BIT(15) AS (b) VIRTUAL,
PRIMARY KEY(pk),
UNIQUE(v)
) ENGINE=InnoDB;
INSERT IGNORE INTO t1 (b) VALUES
(NULL),(b'011'),(b'000110100'),
(b'01101101010'),(b'01111001001011'),(NULL);
SET GLOBAL innodb_debug_sync = "ib_clust_v_col_before_row_allocated "
"SIGNAL before_row_allocated "
"WAIT_FOR flush_unlock";
SET GLOBAL innodb_debug_sync = "ib_open_after_dict_open "
"SIGNAL purge_open "
"WAIT_FOR select_open";
# In 10.2 trx_undo_roll_ptr_is_insert(t_roll_ptr) condition never pass in purge,
# so this condition is forced to pass in row_vers_old_has_index_entry
set global debug_dbug= "d,ib_purge_virtual_index_callback";
# The purge starts from REPLACE command. To avoid possible race, separate
# connection is used.
--connect(purge_waiter,localhost,root)
--send
SET debug_sync= "now WAIT_FOR before_row_allocated";
--connection default
REPLACE INTO t1 (pk, b) SELECT pk, b FROM t1;
--connection purge_waiter
# Now we will definitely catch ib_clust_v_col_before_row_allocated
--reap
--connection default
--disconnect purge_waiter
# purge hangs on the sync point. table is purged, ref_count is set to 0
FLUSH TABLES;
# Avoid hang on repeating purge.
# Reset Will be applied after first record is purged
SET GLOBAL innodb_debug_sync = reset;
SET debug_sync= "now SIGNAL flush_unlock WAIT_FOR purge_open";
# Avoid hang on repeating purge
SET GLOBAL innodb_debug_sync = reset;
# select unblocks purge thread
SET debug_sync= "ib_open_after_dict_open SIGNAL select_open";
SELECT * FROM t1;
# Cleanup
DROP TABLE t1;
SET debug_sync= reset;
set global debug_dbug= @old_dbug;
--echo # MDEV-16962 Assertion '!error || !ot_ctx.can_recover_from_failed_open()'
--echo # failed in open_purge_table upon concurrent ALTER and FLUSH
CREATE TABLE t1 (
pk SERIAL,
c VARCHAR(128),
d DATE,
vd DATE AS (d) VIRTUAL,
PRIMARY KEY(pk),
KEY(vd,c)
) ENGINE=InnoDB;
INSERT IGNORE INTO t1 (pk,c) VALUES (1,'foo');
--send
set debug_sync="now WAIT_FOR purge";
--connect (con1,localhost,root,,test)
# Will break innodb purge thread inside open_purge_table after mdl
# acquired, but before tdc->flushed check
SET GLOBAL innodb_debug_sync="after_open_table_mdl_shared SIGNAL purge WAIT_FOR flush";
# Workaround to pass trx_undo_roll_ptr_is_insert() in 10.2
SET global debug_dbug="d,ib_purge_virtual_index_callback";
REPLACE INTO t1 (pk,c) VALUES (1,'bar');
--connection default
# wait for MDL acquired by purge
--reap
# MDL_SHARED will be acquired, but will hang before MDL upgrade started.
SET debug_sync="alter_table_before_rename_result_table WAIT_FOR flush";
--send
ALTER TABLE t1 ADD FULLTEXT KEY(c), ALGORITHM=COPY;
--connection con1
# Will hang after tdc->flushed is set, but before emptying tdc cache.
SET debug_sync="after_flush_unlock SIGNAL flush ";
FLUSH TABLES;
# Cleanup
--disconnect con1
--connection default
--reap
--source ../../innodb/include/wait_all_purged.inc
DROP TABLE t1;
SET debug_sync= reset;
SET global debug_dbug=@old_dbug;
SET GLOBAL innodb_purge_rseg_truncate_frequency = @saved_frequency;

View File

@ -1,3 +1,7 @@
call mtr.add_suppression("Cannot find index f2 in InnoDB index dictionary.");
call mtr.add_suppression("InnoDB indexes are inconsistent with what defined in .frm for table .*");
call mtr.add_suppression("Table test/t1 contains 1 indexes inside InnoDB, which is different from the number of indexes 2 defined in the MariaDB .*");
call mtr.add_suppression("InnoDB could not find key no 1 with name f2 from dict cache for table .*");
#
# Bug #18734396 INNODB IN-PLACE ALTER FAILURES BLOCK FUTURE ALTERS
#
@ -24,3 +28,39 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`f2`,`f1`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
drop table t1;
#
# MDEV-22928 InnoDB fails to fetch index type
# when index mismatch
#
CREATE TABLE t1(f1 INT NOT NULL, f2 INT NOT NULL,
index(f1), index(f2))ENGINE=InnoDB;
INSERT INTO t1 VALUES(1, 1), (2, 2);
connect con1,localhost,root,,test;
SET DEBUG_SYNC="alter_table_inplace_after_commit SIGNAL default_signal WAIT_FOR default_done";
ALTER TABLE t1 DROP INDEX f2, ALGORITHM=INPLACE;
connection default;
set DEBUG_SYNC="now WAIT_FOR default_signal";
disconnect con1;
SHOW KEYS FROM t1;
Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment
t1 1 f1 1 f1 A 2 NULL NULL BTREE
t1 1 f2 1 f2 A NULL NULL NULL Corrupted
Warnings:
Warning 1082 InnoDB: Table test/t1 contains 1 indexes inside InnoDB, which is different from the number of indexes 2 defined in the MariaDB
Warning 1082 InnoDB: Table test/t1 contains 1 indexes inside InnoDB, which is different from the number of indexes 2 defined in the MariaDB
DROP TABLE t1;
#
# MDEV-25503 InnoDB hangs on startup during recovery
#
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1;
connect con1,localhost,root,,;
BEGIN;
DELETE FROM mysql.innodb_table_stats;
connect con2,localhost,root,,;
SET DEBUG_SYNC='inplace_after_index_build SIGNAL blocked WAIT_FOR ever';
ALTER TABLE t1 FORCE;
connection default;
SET DEBUG_SYNC='now WAIT_FOR blocked';
SELECT * FROM t1;
a
DROP TABLE t1;

View File

@ -21,7 +21,6 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES
WHERE engine = 'innodb'
AND support IN ('YES', 'DEFAULT', 'ENABLED');
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
FOUND 1 /InnoDB: Ignoring data file '.*t1.ibd' with space ID/ in mysqld.1.err
FOUND 1 /InnoDB: Tablespace \d+ was not found at.*t3.ibd/ in mysqld.1.err
# Fault 3: Wrong space_id in a dirty file, and no missing file.
SELECT * FROM INFORMATION_SCHEMA.ENGINES

View File

@ -12,6 +12,14 @@
--source include/innodb_page_size.inc
--source include/have_debug_sync.inc
call mtr.add_suppression("Cannot find index f2 in InnoDB index dictionary.");
call mtr.add_suppression("InnoDB indexes are inconsistent with what defined in .frm for table .*");
call mtr.add_suppression("Table test/t1 contains 1 indexes inside InnoDB, which is different from the number of indexes 2 defined in the MariaDB .*");
call mtr.add_suppression("InnoDB could not find key no 1 with name f2 from dict cache for table .*");
--echo #
--echo # Bug #18734396 INNODB IN-PLACE ALTER FAILURES BLOCK FUTURE ALTERS
--echo #
@ -41,3 +49,44 @@ show create table t1;
ALTER TABLE t1 ADD PRIMARY KEY (f2, f1);
show create table t1;
drop table t1;
--echo #
--echo # MDEV-22928 InnoDB fails to fetch index type
--echo # when index mismatch
--echo #
CREATE TABLE t1(f1 INT NOT NULL, f2 INT NOT NULL,
index(f1), index(f2))ENGINE=InnoDB;
INSERT INTO t1 VALUES(1, 1), (2, 2);
connect (con1,localhost,root,,test);
SET DEBUG_SYNC="alter_table_inplace_after_commit SIGNAL default_signal WAIT_FOR default_done";
--send
ALTER TABLE t1 DROP INDEX f2, ALGORITHM=INPLACE;
connection default;
set DEBUG_SYNC="now WAIT_FOR default_signal";
--let $shutdown_timeout=0
--source include/restart_mysqld.inc
disconnect con1;
SHOW KEYS FROM t1;
DROP TABLE t1;
remove_files_wildcard $datadir/test #sql-*.frm;
--echo #
--echo # MDEV-25503 InnoDB hangs on startup during recovery
--echo #
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1;
connect (con1,localhost,root,,);
BEGIN;
DELETE FROM mysql.innodb_table_stats;
connect (con2,localhost,root,,);
SET DEBUG_SYNC='inplace_after_index_build SIGNAL blocked WAIT_FOR ever';
send ALTER TABLE t1 FORCE;
connection default;
SET DEBUG_SYNC='now WAIT_FOR blocked';
--let $shutdown_timeout=0
--source include/restart_mysqld.inc
SELECT * FROM t1;
DROP TABLE t1;
remove_files_wildcard $datadir/test #sql-*.frm;

View File

@ -54,9 +54,6 @@ let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t2.ibd' with space ID \d+. Ano
--source include/start_mysqld.inc
eval $check_no_innodb;
let SEARCH_PATTERN= InnoDB: Ignoring data file '.*t1.ibd' with space ID;
--source include/search_pattern_in_file.inc
let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at.*t3.ibd;
--source include/search_pattern_in_file.inc

View File

@ -958,3 +958,13 @@ UPDATE t1 SET f6='cascade';
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t1`, CONSTRAINT `t1_ibfk_3` FOREIGN KEY (`f5`) REFERENCES `t1` (`f6`) ON UPDATE SET NULL)
DROP TABLE t1;
SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
#
# MDEV-25536 sym_node->table != NULL in pars_retrieve_table_def
#
CREATE TABLE t1 (f1 TEXT,FULLTEXT (f1)) ENGINE=InnoDB;
ALTER TABLE t1 DISCARD TABLESPACE;
SET GLOBAL innodb_ft_aux_table='test/t1';
SELECT * FROM information_schema.innodb_ft_deleted;
DOC_ID
DROP TABLE t1;
SET GLOBAL innodb_ft_aux_table=DEFAULT;

View File

@ -0,0 +1 @@
--innodb-ft-deleted

View File

@ -932,3 +932,13 @@ UPDATE t1 SET f6='cascade';
DROP TABLE t1;
SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
--echo #
--echo # MDEV-25536 sym_node->table != NULL in pars_retrieve_table_def
--echo #
CREATE TABLE t1 (f1 TEXT,FULLTEXT (f1)) ENGINE=InnoDB;
ALTER TABLE t1 DISCARD TABLESPACE;
SET GLOBAL innodb_ft_aux_table='test/t1';
SELECT * FROM information_schema.innodb_ft_deleted;
DROP TABLE t1;
SET GLOBAL innodb_ft_aux_table=DEFAULT;

View File

@ -0,0 +1,17 @@
include/master-slave.inc
[connection master]
connection master;
SET @saved_dbug = @@GLOBAL.debug_dbug;
SET @@global.debug_dbug= 'd,simulate_pos_4G';
connection slave;
include/stop_slave.inc
CHANGE MASTER TO MASTER_HEARTBEAT_PERIOD=0.001;
include/start_slave.inc
connection master;
SET @@GLOBAL.debug_dbug = @saved_dbug;
connection slave;
connection master;
CREATE TABLE t (f INT) ENGINE=INNODB;
INSERT INTO t VALUES (10);
DROP TABLE t;
include/rpl_end.inc

View File

@ -0,0 +1,44 @@
# ==== Purpose ====
#
# Test verifies that slave IO thread can process heartbeat events with log_pos
# values higher than UINT32_MAX.
#
# ==== Implementation ====
#
# Steps:
# 0 - Stop slave threads. Configure a small master_heartbeat_period.
# 1 - Using debug points, simulate a huge binlog offset higher than
# UINT32_MAX on master.
# 2 - Start the slave and observe that slave IO thread is able to process
# the offset received through heartbeat event.
#
# ==== References ====
#
# MDEV-16146: MariaDB slave stops with incompatible heartbeat
#
--source include/have_debug.inc
--source include/have_innodb.inc
--source include/have_binlog_format_mixed.inc
# Test simulates binarylog offsets higher than UINT32_MAX
--source include/have_64bit.inc
--source include/master-slave.inc
--connection master
SET @saved_dbug = @@GLOBAL.debug_dbug;
SET @@global.debug_dbug= 'd,simulate_pos_4G';
--connection slave
--source include/stop_slave.inc
CHANGE MASTER TO MASTER_HEARTBEAT_PERIOD=0.001;
--source include/start_slave.inc
--connection master
sleep 1;
SET @@GLOBAL.debug_dbug = @saved_dbug;
--sync_slave_with_master
--connection master
CREATE TABLE t (f INT) ENGINE=INNODB;
INSERT INTO t VALUES (10);
DROP TABLE t;
--source include/rpl_end.inc

View File

@ -537,6 +537,18 @@ NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST OFF,ON
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
VARIABLE_NAME INNODB_DEBUG_SYNC
SESSION_VALUE NULL
DEFAULT_VALUE
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE VARCHAR
VARIABLE_COMMENT debug_sync for innodb purge threads. Use it to set up sync points for all purge threads at once. The commands will be applied sequentially at the beginning of purging the next undo record.
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT NONE
VARIABLE_NAME INNODB_DEFAULT_ENCRYPTION_KEY_ID
SESSION_VALUE 1
DEFAULT_VALUE 1

View File

@ -67,4 +67,17 @@ connection master;
DROP VIEW v1;
set @@binlog_row_image=default;
DROP TABLE t1;
SET SQL_MODE=default;
CREATE TABLE t1 (pk INT, a VARCHAR(3), b VARCHAR(1) AS (a) VIRTUAL, PRIMARY KEY (pk));
INSERT IGNORE INTO t1 (pk, a) VALUES (1,'foo'),(2,'bar');
Warnings:
Warning 1265 Data truncated for column 'b' at row 1
Warning 1265 Data truncated for column 'b' at row 2
REPLACE INTO t1 (pk) VALUES (2);
ERROR 22001: Data too long for column 'b' at row 1
UPDATE IGNORE t1 SET a = NULL;
Warnings:
Warning 1265 Data truncated for column 'b' at row 1
Warning 1265 Data truncated for column 'b' at row 2
DROP TABLE t1;
include/rpl_end.inc

View File

@ -51,5 +51,19 @@ DROP VIEW v1;
set @@binlog_row_image=default;
DROP TABLE t1;
SET SQL_MODE=default;
# MDEV-24782
# ASAN use-after-poison in Field::pack_int / THD::binlog_update_row
CREATE TABLE t1 (pk INT, a VARCHAR(3), b VARCHAR(1) AS (a) VIRTUAL, PRIMARY KEY (pk));
INSERT IGNORE INTO t1 (pk, a) VALUES (1,'foo'),(2,'bar');
--error ER_DATA_TOO_LONG
REPLACE INTO t1 (pk) VALUES (2);
UPDATE IGNORE t1 SET a = NULL;
# Cleanup
DROP TABLE t1;
--source include/rpl_end.inc

View File

@ -58,6 +58,7 @@ int my_delete(const char *name, myf MyFlags)
#if defined (_WIN32)
/*
Delete file.
@ -65,15 +66,14 @@ int my_delete(const char *name, myf MyFlags)
where another program (or thread in the current program) has the the same file
open.
We're using 2 tricks to prevent the errors.
We're using several tricks to prevent the errors, such as
1. A usual Win32's DeleteFile() can with ERROR_SHARED_VIOLATION,
because the file is opened in another application (often, antivirus or backup)
- Windows 10 "posix semantics" delete
We avoid the error by using CreateFile() with FILE_FLAG_DELETE_ON_CLOSE, instead
- Avoid the error by using CreateFile() with FILE_FLAG_DELETE_ON_CLOSE, instead
of DeleteFile()
2. If file which is deleted (delete on close) but has not entirely gone,
- If file which is deleted (delete on close) but has not entirely gone,
because it is still opened by some app, an attempt to trcreate file with the
same name would result in yet another error. The workaround here is renaming
a file to unique name.
@ -116,6 +116,27 @@ static int my_win_unlink(const char *name)
DBUG_RETURN(0);
}
/*
Try Windows 10 method, delete with "posix semantics" (file is not visible, and creating
a file with the same name won't fail, even if it the fiile was open)
*/
struct
{
DWORD _Flags;
} disp={0x3};
/* 0x3 = FILE_DISPOSITION_FLAG_DELETE | FILE_DISPOSITION_FLAG_POSIX_SEMANTICS */
handle= CreateFile(name, DELETE, FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
NULL, OPEN_EXISTING, 0, NULL);
if (handle != INVALID_HANDLE_VALUE)
{
BOOL ok= SetFileInformationByHandle(handle,
(FILE_INFO_BY_HANDLE_CLASS) 21, &disp, sizeof(disp));
CloseHandle(handle);
if (ok)
DBUG_RETURN(0);
}
handle= CreateFile(name, DELETE, 0, NULL, OPEN_EXISTING, FILE_FLAG_DELETE_ON_CLOSE, NULL);
if (handle != INVALID_HANDLE_VALUE)
{

View File

@ -572,12 +572,8 @@ then
echo
echo
echo "PLEASE REMEMBER TO SET A PASSWORD FOR THE MariaDB root USER !"
echo "To do so, start the server, then issue the following commands:"
echo "To do so, start the server, then issue the following command:"
echo
echo "'$bindir/mysqladmin' -u root password 'new-password'"
echo "'$bindir/mysqladmin' -u root -h $hostname password 'new-password'"
echo
echo "Alternatively you can run:"
echo "'$bindir/mysql_secure_installation'"
echo
echo "which will also give you the option of removing the test"

View File

@ -980,8 +980,9 @@ public:
virtual void reset_fields() {}
const uchar *ptr_in_record(const uchar *record) const
{
my_ptrdiff_t l_offset= (my_ptrdiff_t) (record - table->record[0]);
return ptr + l_offset;
my_ptrdiff_t l_offset= (my_ptrdiff_t) (ptr - table->record[0]);
DBUG_ASSERT(l_offset >= 0 && table->s->rec_buff_length - l_offset > 0);
return record + l_offset;
}
virtual int set_default();

View File

@ -15097,14 +15097,23 @@ bool copy_event_cache_to_file_and_reinit(IO_CACHE *cache, FILE *file)
}
#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT)
Heartbeat_log_event::Heartbeat_log_event(const char* buf, uint event_len,
Heartbeat_log_event::Heartbeat_log_event(const char* buf, ulong event_len,
const Format_description_log_event* description_event)
:Log_event(buf, description_event)
{
uint8 header_size= description_event->common_header_len;
ident_len = event_len - header_size;
set_if_smaller(ident_len,FN_REFLEN-1);
log_ident= buf + header_size;
if (log_pos == 0)
{
log_pos= uint8korr(buf + header_size);
log_ident= buf + header_size + HB_SUB_HEADER_LEN;
ident_len= event_len - (header_size + HB_SUB_HEADER_LEN);
}
else
{
log_ident= buf + header_size;
ident_len = event_len - header_size;
}
}
#endif

View File

@ -570,6 +570,14 @@ class String;
#define MARIA_SLAVE_CAPABILITY_MINE MARIA_SLAVE_CAPABILITY_GTID
/*
When the size of 'log_pos' within Heartbeat_log_event exceeds UINT32_MAX it
cannot be accommodated in common_header, as 'log_pos' is of 4 bytes size. In
such cases, sub_header, of size 8 bytes will hold larger 'log_pos' value.
*/
#define HB_SUB_HEADER_LEN 8
/**
@enum Log_event_type
@ -5183,12 +5191,13 @@ bool copy_cache_to_file_wrapped(IO_CACHE *body,
class Heartbeat_log_event: public Log_event
{
public:
Heartbeat_log_event(const char* buf, uint event_len,
uint8 hb_flags;
Heartbeat_log_event(const char* buf, ulong event_len,
const Format_description_log_event* description_event);
Log_event_type get_type_code() { return HEARTBEAT_LOG_EVENT; }
bool is_valid() const
{
return (log_ident != NULL &&
return (log_ident != NULL && ident_len <= FN_REFLEN-1 &&
log_pos >= BIN_LOG_HEADER_SIZE);
}
const char * get_log_ident() { return log_ident; }

View File

@ -1471,7 +1471,11 @@ static bool validate_password(LEX_USER *user, THD *thd)
else
{
if (!thd->slave_thread &&
strict_password_validation && has_validation_plugins())
strict_password_validation && has_validation_plugins()
#ifdef WITH_WSREP
&& !thd->wsrep_applier
#endif
)
{
my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--strict-password-validation");
return true;

View File

@ -4677,7 +4677,7 @@ TABLE *open_purge_table(THD *thd, const char *db, size_t dblen,
DBUG_ASSERT(thd->open_tables == NULL);
DBUG_ASSERT(thd->locked_tables_mode < LTM_PRELOCKED);
Open_table_context ot_ctx(thd, 0);
Open_table_context ot_ctx(thd, MYSQL_OPEN_IGNORE_FLUSH);
TABLE_LIST *tl= (TABLE_LIST*)thd->alloc(sizeof(TABLE_LIST));
LEX_CSTRING db_name= {db, dblen };
LEX_CSTRING table_name= { tb, tblen };

View File

@ -1798,9 +1798,10 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
in handler methods for the just read row in record[1].
*/
table->move_fields(table->field, table->record[1], table->record[0]);
if (table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_REPLACE))
goto err;
int verr = table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_REPLACE);
table->move_fields(table->field, table->record[0], table->record[1]);
if (verr)
goto err;
}
if (info->handle_duplicates == DUP_UPDATE)
{

View File

@ -766,15 +766,15 @@ void lex_end(LEX *lex)
DBUG_ENTER("lex_end");
DBUG_PRINT("enter", ("lex: %p", lex));
lex_end_stage1(lex);
lex_end_stage2(lex);
lex_unlock_plugins(lex);
lex_end_nops(lex);
DBUG_VOID_RETURN;
}
void lex_end_stage1(LEX *lex)
void lex_unlock_plugins(LEX *lex)
{
DBUG_ENTER("lex_end_stage1");
DBUG_ENTER("lex_unlock_plugins");
/* release used plugins */
if (lex->plugins.elements) /* No function call and no mutex if no plugins. */
@ -783,33 +783,23 @@ void lex_end_stage1(LEX *lex)
lex->plugins.elements);
}
reset_dynamic(&lex->plugins);
if (lex->context_analysis_only & CONTEXT_ANALYSIS_ONLY_PREPARE)
{
/*
Don't delete lex->sphead, it'll be needed for EXECUTE.
Note that of all statements that populate lex->sphead
only SQLCOM_COMPOUND can be PREPAREd
*/
DBUG_ASSERT(lex->sphead == 0 || lex->sql_command == SQLCOM_COMPOUND);
}
else
{
sp_head::destroy(lex->sphead);
lex->sphead= NULL;
}
DBUG_VOID_RETURN;
}
/*
Don't delete lex->sphead, it'll be needed for EXECUTE.
Note that of all statements that populate lex->sphead
only SQLCOM_COMPOUND can be PREPAREd
MASTER INFO parameters (or state) is normally cleared towards the end
of a statement. But in case of PS, the state needs to be preserved during
its lifetime and should only be cleared on PS close or deallocation.
*/
void lex_end_stage2(LEX *lex)
void lex_end_nops(LEX *lex)
{
DBUG_ENTER("lex_end_stage2");
DBUG_ENTER("lex_end_nops");
sp_head::destroy(lex->sphead);
lex->sphead= NULL;
/* Reset LEX_MASTER_INFO */
lex->mi.reset(lex->sql_command == SQLCOM_CHANGE_MASTER);
@ -5448,13 +5438,33 @@ void LEX::sp_variable_declarations_init(THD *thd, int nvars)
bool LEX::sp_variable_declarations_set_default(THD *thd, int nvars,
Item *dflt_value_item)
{
if (!dflt_value_item &&
bool has_default_clause= dflt_value_item != NULL;
if (!has_default_clause &&
unlikely(!(dflt_value_item= new (thd->mem_root) Item_null(thd))))
return true;
sp_variable *first_spvar = NULL;
for (uint i= 0 ; i < (uint) nvars ; i++)
{
sp_variable *spvar= spcont->get_last_context_variable((uint) nvars - 1 - i);
if (i == 0) {
first_spvar = spvar;
} else if (has_default_clause) {
Item_splocal *item =
new (thd->mem_root)
Item_splocal(thd, &sp_rcontext_handler_local,
&first_spvar->name, first_spvar->offset,
first_spvar->type_handler(), 0, 0);
if (item == NULL)
return true; // OOM
#ifndef DBUG_OFF
item->m_sp = sphead;
#endif
dflt_value_item = item;
}
bool last= i + 1 == (uint) nvars;
spvar->default_value= dflt_value_item;
/* The last instruction is responsible for freeing LEX. */

View File

@ -4340,8 +4340,8 @@ extern void lex_init(void);
extern void lex_free(void);
extern void lex_start(THD *thd);
extern void lex_end(LEX *lex);
extern void lex_end_stage1(LEX *lex);
extern void lex_end_stage2(LEX *lex);
extern void lex_end_nops(LEX *lex);
extern void lex_unlock_plugins(LEX *lex);
void end_lex_with_single_table(THD *thd, TABLE *table, LEX *old_lex);
int init_lex_with_single_table(THD *thd, TABLE *table, LEX *lex);
extern int MYSQLlex(union YYSTYPE *yylval, THD *thd);

View File

@ -4276,8 +4276,10 @@ bool Prepared_statement::prepare(const char *packet, uint packet_len)
thd->release_transactional_locks();
}
/* Preserve CHANGE MASTER attributes */
lex_end_stage1(lex);
/* Preserve locked plugins for SET */
if (lex->sql_command != SQLCOM_SET_OPTION)
lex_unlock_plugins(lex);
cleanup_stmt();
thd->restore_backup_statement(this, &stmt_backup);
thd->stmt_arena= old_stmt_arena;
@ -5159,7 +5161,7 @@ void Prepared_statement::deallocate_immediate()
status_var_increment(thd->status_var.com_stmt_close);
/* It should now be safe to reset CHANGE MASTER parameters */
lex_end_stage2(lex);
lex_end(lex);
}

View File

@ -32,6 +32,7 @@
#include "semisync_master.h"
#include "semisync_slave.h"
enum enum_gtid_until_state {
GTID_UNTIL_NOT_DONE,
GTID_UNTIL_STOP_AFTER_STANDALONE,
@ -827,6 +828,8 @@ static int send_heartbeat_event(binlog_send_info *info,
DBUG_ENTER("send_heartbeat_event");
ulong ev_offset;
char sub_header_buf[HB_SUB_HEADER_LEN];
bool sub_header_in_use=false;
if (reset_transmit_packet(info, info->flags, &ev_offset, &info->errmsg))
DBUG_RETURN(1);
@ -847,18 +850,38 @@ static int send_heartbeat_event(binlog_send_info *info,
size_t event_len = ident_len + LOG_EVENT_HEADER_LEN +
(do_checksum ? BINLOG_CHECKSUM_LEN : 0);
int4store(header + SERVER_ID_OFFSET, global_system_variables.server_id);
DBUG_EXECUTE_IF("simulate_pos_4G",
{
const_cast<event_coordinates *>(coord)->pos= (UINT_MAX32 + (ulong)1);
DBUG_SET("-d, simulate_pos_4G");
};);
if (coord->pos <= UINT_MAX32)
{
int4store(header + LOG_POS_OFFSET, coord->pos); // log_pos
}
else
{
// Set common_header.log_pos=0 to indicate its overflow
int4store(header + LOG_POS_OFFSET, 0);
sub_header_in_use= true;
int8store(sub_header_buf, coord->pos);
event_len+= HB_SUB_HEADER_LEN;
}
int4store(header + EVENT_LEN_OFFSET, event_len);
int2store(header + FLAGS_OFFSET, 0);
int4store(header + LOG_POS_OFFSET, coord->pos); // log_pos
packet->append(header, sizeof(header));
packet->append(p, ident_len); // log_file_name
if (sub_header_in_use)
packet->append(sub_header_buf, sizeof(sub_header_buf));
packet->append(p, ident_len); // log_file_name
if (do_checksum)
{
char b[BINLOG_CHECKSUM_LEN];
ha_checksum crc= my_checksum(0, (uchar*) header, sizeof(header));
if (sub_header_in_use)
crc= my_checksum(crc, (uchar*) sub_header_buf, sizeof(sub_header_buf));
crc= my_checksum(crc, (uchar*) p, ident_len);
int4store(b, crc);
packet->append(b, sizeof(b));

View File

@ -5818,11 +5818,18 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table,
/*
Ensure that we have an exclusive lock on target table if we are creating
non-temporary table.
If we're creating non-temporary table, then either
- there is an exclusive lock on the table
or
- there was CREATE IF EXIST, and the table was not created
(it existed), and was previously locked
*/
DBUG_ASSERT((create_info->tmp_table()) ||
thd->mdl_context.is_lock_owner(MDL_key::TABLE, table->db.str,
table->table_name.str,
MDL_EXCLUSIVE));
MDL_EXCLUSIVE) ||
(thd->locked_tables_mode && pos_in_locked_tables &&
create_info->if_not_exists()));
}
DEBUG_SYNC(thd, "create_table_like_before_binlog");
@ -7788,6 +7795,8 @@ static bool mysql_inplace_alter_table(THD *thd,
goto rollback;
}
DEBUG_SYNC(thd, "alter_table_inplace_after_commit");
thd->drop_temporary_table(altered_table, NULL, false);
}

View File

@ -195,7 +195,7 @@ void udf_init()
DBUG_PRINT("info",("init udf record"));
LEX_CSTRING name;
name.str=get_field(&mem, table->field[0]);
name.length = (uint) strlen(name.str);
name.length = (uint) safe_strlen(name.str);
char *dl_name= get_field(&mem, table->field[2]);
bool new_dl=0;
Item_udftype udftype=UDFTYPE_FUNCTION;
@ -209,12 +209,12 @@ void udf_init()
On windows we must check both FN_LIBCHAR and '/'.
*/
if (check_valid_path(dl_name, strlen(dl_name)) ||
if (!name.str || !dl_name || check_valid_path(dl_name, strlen(dl_name)) ||
check_string_char_length(&name, 0, NAME_CHAR_LEN,
system_charset_info, 1))
{
sql_print_error("Invalid row in mysql.func table for function '%.64s'",
name.str);
safe_str(name.str));
continue;
}

View File

@ -1571,8 +1571,9 @@ bool Multiupdate_prelocking_strategy::handle_end(THD *thd)
call in setup_tables()).
*/
if (setup_tables(thd, &select_lex->context, &select_lex->top_join_list,
table_list, select_lex->leaf_tables, FALSE, TRUE))
if (setup_tables_and_check_access(thd, &select_lex->context,
&select_lex->top_join_list, table_list, select_lex->leaf_tables,
FALSE, UPDATE_ACL, SELECT_ACL, TRUE))
DBUG_RETURN(1);
List<Item> *fields= &lex->select_lex.item_list;

View File

@ -274,9 +274,6 @@ IF(CONNECT_WITH_JDBC)
Mongo2Interface.java Mongo3Interface.java
JavaWrappers.jar)
add_definitions(-DJAVA_SUPPORT)
IF(CONNECT_WITH_MONGO)
add_definitions(-DMONGO_SUPPORT)
ENDIF()
ELSE()
SET(JDBC_LIBRARY "")
ENDIF()

View File

@ -1138,6 +1138,9 @@ PBVAL BJSON::GetArrayValue(PBVAL bap, int n)
CheckType(bap, TYPE_JAR);
int i = 0;
if (n < 0)
n += GetArraySize(bap);
for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp), i++)
if (i == n)
return bvp;
@ -1348,12 +1351,17 @@ PBVAL BJSON::NewVal(PVAL valp)
/***********************************************************************/
/* Sub-allocate and initialize a BVAL from another BVAL. */
/***********************************************************************/
PBVAL BJSON::DupVal(PBVAL bvlp) {
PBVAL bvp = NewVal();
PBVAL BJSON::DupVal(PBVAL bvlp)
{
if (bvlp) {
PBVAL bvp = NewVal();
*bvp = *bvlp;
bvp->Next = 0;
return bvp;
} else
return NULL;
*bvp = *bvlp;
bvp->Next = 0;
return bvp;
} // end of DupVal
/***********************************************************************/

View File

@ -117,7 +117,7 @@ BJNX::BJNX(PGLOBAL g) : BDOC(g)
Jp = NULL;
Nodes = NULL;
Value = NULL;
MulVal = NULL;
//MulVal = NULL;
Jpath = NULL;
Buf_Type = TYPE_STRING;
Long = len;
@ -148,7 +148,7 @@ BJNX::BJNX(PGLOBAL g, PBVAL row, int type, int len, int prec, my_bool wr) : BDOC
Jp = NULL;
Nodes = NULL;
Value = AllocateValue(g, type, len, prec);
MulVal = NULL;
//MulVal = NULL;
Jpath = NULL;
Buf_Type = type;
Long = len;
@ -273,40 +273,6 @@ my_bool BJNX::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm)
return true;
} // endif's
#if 0
// For calculated arrays, a local Value must be used
switch (jnp->Op) {
case OP_NUM:
jnp->Valp = AllocateValue(g, TYPE_INT);
break;
case OP_ADD:
case OP_MULT:
case OP_SEP:
if (!IsTypeChar(Buf_Type))
jnp->Valp = AllocateValue(g, Buf_Type, 0, GetPrecision());
else
jnp->Valp = AllocateValue(g, TYPE_DOUBLE, 0, 2);
break;
case OP_MIN:
case OP_MAX:
jnp->Valp = AllocateValue(g, Buf_Type, Long, GetPrecision());
break;
case OP_CNC:
if (IsTypeChar(Buf_Type))
jnp->Valp = AllocateValue(g, TYPE_STRING, Long, GetPrecision());
else
jnp->Valp = AllocateValue(g, TYPE_STRING, 512);
break;
default:
break;
} // endswitch Op
if (jnp->Valp)
MulVal = AllocateValue(g, jnp->Valp);
#endif // 0
return false;
} // end of SetArrayOptions
@ -452,6 +418,8 @@ PBVAL BJNX::MakeJson(PGLOBAL g, PBVAL bvp, int n)
{
PBVAL vlp, jvp = bvp;
Jb = false;
if (n < Nod -1) {
if (bvp->Type == TYPE_JAR) {
int ars = GetArraySize(bvp);
@ -3022,7 +2990,7 @@ void bson_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*)
PBVAL bop = (PBVAL)g->Activityp;
if (g->N-- > 0)
bxp->SetKeyValue(bop, bxp->MakeValue(args, 1), MakePSZ(g, args, 0));
bxp->SetKeyValue(bop, bxp->MakeValue(args, 1), MakePSZ(g, args, 0));
} // end of bson_object_grp_add
@ -3710,7 +3678,7 @@ char *bson_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result,
PUSH_WARNING("CheckMemory error");
goto fin;
} else {
bnx.Reset();
bnx.Reset();
jvp = bnx.MakeValue(args, 0, true);
if (g->Mrr) { // First argument is a constant
@ -4056,7 +4024,7 @@ double bsonget_real(UDF_INIT *initid, UDF_ARGS *args,
*is_null = 1;
return 0.0;
} else {
bnx.Reset();
bnx.Reset();
jvp = bnx.MakeValue(args, 0);
if ((p = bnx.GetString(jvp))) {

View File

@ -41,7 +41,6 @@ typedef struct _jnode {
PSZ Key; // The key used for object
OPVAL Op; // Operator used for this node
PVAL CncVal; // To cont value used for OP_CNC
PVAL Valp; // The internal array VALUE
int Rank; // The rank in array
int Rx; // Read row number
int Nx; // Next to read row number
@ -153,7 +152,7 @@ protected:
JOUTSTR *Jp;
JNODE *Nodes; // The intermediate objects
PVAL Value;
PVAL MulVal; // To value used by multiple column
//PVAL MulVal; // To value used by multiple column
char *Jpath; // The json path
int Buf_Type;
int Long;

View File

@ -150,6 +150,12 @@ void CMgoConn::mongo_init(bool init)
/***********************************************************************/
bool CMgoConn::Connect(PGLOBAL g)
{
if (!Pcg->Db_name || !Pcg->Coll_name) {
// This would crash in mongoc_client_get_collection
strcpy(g->Message, "Missing DB or collection name");
return true;
} // endif name
if (!IsInit)
#if defined(__WIN__)
__try {

View File

@ -170,7 +170,7 @@
#define JSONMAX 10 // JSON Default max grp size
extern "C" {
char version[]= "Version 1.07.0002 January 27, 2021";
char version[]= "Version 1.07.0002 March 22, 2021";
#if defined(__WIN__)
char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__;
char slash= '\\';
@ -275,6 +275,10 @@ static handler *connect_create_handler(handlerton *hton,
TABLE_SHARE *table,
MEM_ROOT *mem_root);
static bool checkPrivileges(THD* thd, TABTYPE type, PTOS options,
const char* db, TABLE* table = NULL,
bool quick = false);
static int connect_assisted_discovery(handlerton *hton, THD* thd,
TABLE_SHARE *table_s,
HA_CREATE_INFO *info);
@ -757,8 +761,8 @@ DllExport LPCSTR PlugSetPath(LPSTR to, LPCSTR name, LPCSTR dir)
For engines that have two file name extensions (separate meta/index file
and data file), the order of elements is relevant. First element of engine
file name extensions array should be meta/index file extention. Second
element - data file extention. This order is assumed by
file name extensions array should be meta/index file extension. Second
element - data file extension. This order is assumed by
prepare_for_repair() when REPAIR TABLE ... USE_FRM is issued.
@see
@ -1294,9 +1298,9 @@ PCSZ GetStringTableOption(PGLOBAL g, PTOS options, PCSZ opname, PCSZ sdef)
else if (!stricmp(opname, "Data_charset"))
opval= options->data_charset;
else if (!stricmp(opname, "Http") || !stricmp(opname, "URL"))
opval = options->http;
opval= options->http;
else if (!stricmp(opname, "Uri"))
opval = options->uri;
opval= options->uri;
if (!opval && options->oplist)
opval= GetListOption(g, opname, options->oplist);
@ -1610,7 +1614,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
pcf->Opt= (fop) ? (int)fop->opt : 0;
if (fp->field_length >= 0) {
pcf->Length = fp->field_length;
pcf->Length= fp->field_length;
// length is bytes for Connect, not characters
if (!strnicmp(chset, "utf8", 4))
@ -1625,7 +1629,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf)
pcf->Offset= (int)fop->offset;
pcf->Freq= (int)fop->freq;
pcf->Datefmt= (char*)fop->dateformat;
pcf->Fieldfmt = fop->fieldformat ? (char*)fop->fieldformat
pcf->Fieldfmt= fop->fieldformat ? (char*)fop->fieldformat
: fop->jsonpath ? (char*)fop->jsonpath : (char*)fop->xmlpath;
} else {
pcf->Offset= -1;
@ -4508,11 +4512,9 @@ int ha_connect::delete_all_rows()
} // end of delete_all_rows
bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool quick)
static bool checkPrivileges(THD *thd, TABTYPE type, PTOS options,
const char *db, TABLE *table, bool quick)
{
const char *db= (dbn && *dbn) ? dbn : NULL;
TABTYPE type=GetRealType(options);
switch (type) {
case TAB_UNDEF:
// case TAB_CATLG:
@ -4595,6 +4597,16 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn, bool
my_printf_error(ER_UNKNOWN_ERROR, "check_privileges failed", MYF(0));
return true;
} // end of checkPrivileges
// Check whether the user has required (file) privileges
bool ha_connect::check_privileges(THD *thd, PTOS options, const char *dbn,
bool quick)
{
const char *db= (dbn && *dbn) ? dbn : NULL;
TABTYPE type=GetRealType(options);
return checkPrivileges(thd, type, options, db, table, quick);
} // end of check_privileges
// Check that two indexes are equivalent
@ -5388,12 +5400,7 @@ static bool add_field(String* sql, TABTYPE ttp, const char* field_name, int typ,
int len, int dec, char* key, uint tm, const char* rem,
char* dft, char* xtra, char* fmt, int flag, bool dbf, char v)
{
#if defined(DEVELOPMENT)
// Some client programs regard CHAR(36) as GUID
char var = (len > 255 || len == 36) ? 'V' : v;
#else
char var = (len > 255) ? 'V' : v;
#endif
bool q, error = false;
const char* type = PLGtoMYSQLtype(typ, dbf, var);
@ -5729,6 +5736,29 @@ static int connect_assisted_discovery(handlerton *, THD* thd,
#endif // REST_SUPPORT
} // endif ttp
if (fn && *fn)
switch (ttp) {
case TAB_FMT:
case TAB_DBF:
case TAB_XML:
case TAB_INI:
case TAB_VEC:
case TAB_REST:
case TAB_JSON:
#if defined(BSON_SUPPORT)
case TAB_BSON:
#endif // BSON_SUPPORT
if (checkPrivileges(thd, ttp, topt, db)) {
strcpy(g->Message, "This operation requires the FILE privilege");
rc= HA_ERR_INTERNAL_ERROR;
goto err;
} // endif check_privileges
break;
default:
break;
} // endswitch ttp
if (!tab) {
if (ttp == TAB_TBL) {
// Make tab the first table of the list

View File

@ -1,7 +1,7 @@
/************ Javaconn C++ Functions Source Code File (.CPP) ***********/
/* Name: JAVAConn.CPP Version 1.0 */
/* Name: JAVAConn.CPP Version 1.1 */
/* */
/* (C) Copyright to the author Olivier BERTRAND 2017 */
/* (C) Copyright to the author Olivier BERTRAND 2017 - 2021 */
/* */
/* This file contains the JAVA connection classes functions. */
/***********************************************************************/
@ -400,24 +400,35 @@ bool JAVAConn::Open(PGLOBAL g)
jpop->Append(ClassPath);
} // endif ClassPath
// Java source will be compiled as a jar file installed in the plugin dir
#if 0
// Java source will be compiled as a jar file installed in the plugin dir
jpop->Append(sep);
jpop->Append(GetPluginDir());
jpop->Append("JdbcInterface.jar");
#endif // 0
// All wrappers are pre-compiled in JavaWrappers.jar in the plugin dir
jpop->Append(sep);
jpop->Append(GetPluginDir());
jpop->Append("JavaWrappers.jar");
#if defined(MONGO_SUPPORT)
jpop->Append(sep);
jpop->Append(GetPluginDir());
jpop->Append("Mongo3.jar");
jpop->Append(sep);
jpop->Append(GetPluginDir());
jpop->Append("Mongo2.jar");
#endif // MONGO_SUPPORT
if ((cp = getenv("CLASSPATH"))) {
jpop->Append(sep);
jpop->Append(cp);
} // endif cp
if (trace(1)) {
htrc("ClassPath=%s\n", ClassPath);
htrc("CLASSPATH=%s\n", cp);
htrc("ClassPath=%s\n", ClassPath ? ClassPath : "null");
htrc("CLASSPATH=%s\n", cp ? cp : "null");
htrc("%s\n", jpop->GetStr());
} // endif trace

View File

@ -121,20 +121,21 @@ JMgoConn::JMgoConn(PGLOBAL g, PCSZ collname, PCSZ wrapper)
/***********************************************************************/
void JMgoConn::AddJars(PSTRG jpop, char sep)
{
#if defined(BSON_SUPPORT)
#if defined(DEVELOPMENT)
if (m_Version == 2) {
jpop->Append(sep);
// jpop->Append("C:/Eclipse/workspace/MongoWrap2/bin");
jpop->Append(sep);
// jpop->Append(sep);
jpop->Append("C:/mongo-java-driver/mongo-java-driver-2.13.3.jar");
} else {
jpop->Append(sep);
// jpop->Append("C:/Eclipse/workspace/MongoWrap3/bin");
// jpop->Append(sep);
// jpop->Append("C:/Program Files/MariaDB 10.1/lib/plugin/JavaWrappers.jar");
jpop->Append(sep);
// jpop->Append(sep);
jpop->Append("C:/mongo-java-driver/mongo-java-driver-3.4.2.jar");
} // endif m_Version
#endif // BSON_SUPPORT
#endif // DEVELOPMENT
} // end of AddJars
/***********************************************************************/

View File

@ -54,15 +54,24 @@ char *GetExceptionDesc(PGLOBAL g, unsigned int e);
#endif // SE_CATCH
char *GetJsonNull(void);
int GetDefaultPrec(void);
/***********************************************************************/
/* IsNum: check whether this string is all digits. */
/***********************************************************************/
bool IsNum(PSZ s) {
for (char* p = s; *p; p++)
bool IsNum(PSZ s)
{
char* p = s;
if (*p == '-')
p++;
if (*p == ']')
return false;
else for (; *p; p++)
if (*p == ']')
break;
else if (!isdigit(*p) || *p == '-')
else if (!isdigit(*p))
return false;
return true;
@ -1257,6 +1266,8 @@ PJVAL JARRAY::GetArrayValue(int i)
{
if (Mvals && i >= 0 && i < Size)
return Mvals[i];
else if (Mvals && i < 0 && i >= -Size)
return Mvals[Size + i];
else
return NULL;
} // end of GetValue
@ -1752,7 +1763,7 @@ void JVALUE::SetBigint(PGLOBAL g, long long ll)
void JVALUE::SetFloat(PGLOBAL g, double f)
{
F = f;
Nd = 6;
Nd = GetDefaultPrec();
DataType = TYPE_DBL;
} // end of SetFloat

View File

@ -72,7 +72,7 @@ JSNX::JSNX(PGLOBAL g, PJSON row, int type, int len, int prec, my_bool wr)
Jp = NULL;
Nodes = NULL;
Value = AllocateValue(g, type, len, prec);
MulVal = NULL;
//MulVal = NULL;
Jpath = NULL;
Buf_Type = type;
Long = len;
@ -198,38 +198,6 @@ my_bool JSNX::SetArrayOptions(PGLOBAL g, char *p, int i, PSZ nm)
return true;
} // endif's
// For calculated arrays, a local Value must be used
switch (jnp->Op) {
case OP_NUM:
jnp->Valp = AllocateValue(g, TYPE_INT);
break;
case OP_ADD:
case OP_MULT:
case OP_SEP:
if (!IsTypeChar(Buf_Type))
jnp->Valp = AllocateValue(g, Buf_Type, 0, GetPrecision());
else
jnp->Valp = AllocateValue(g, TYPE_DOUBLE, 0, 2);
break;
case OP_MIN:
case OP_MAX:
jnp->Valp = AllocateValue(g, Buf_Type, Long, GetPrecision());
break;
case OP_CNC:
if (IsTypeChar(Buf_Type))
jnp->Valp = AllocateValue(g, TYPE_STRING, Long, GetPrecision());
else
jnp->Valp = AllocateValue(g, TYPE_STRING, 512);
break;
default:
break;
} // endswitch Op
if (jnp->Valp)
MulVal = AllocateValue(g, jnp->Valp);
return false;
} // end of SetArrayOptions
@ -312,7 +280,7 @@ my_bool JSNX::ParseJpath(PGLOBAL g)
} // endfor i, p
Nod = i;
MulVal = AllocateValue(g, Value);
//MulVal = AllocateValue(g, Value);
if (trace(1))
for (i = 0; i < Nod; i++)
@ -323,23 +291,6 @@ my_bool JSNX::ParseJpath(PGLOBAL g)
return false;
} // end of ParseJpath
/*********************************************************************************/
/* MakeJson: Serialize the json item and set value to it. */
/*********************************************************************************/
PVAL JSNX::MakeJson(PGLOBAL g, PJSON jsp)
{
if (Value->IsTypeNum()) {
strcpy(g->Message, "Cannot make Json for a numeric value");
Value->Reset();
} else if (jsp->GetType() != TYPE_JAR && jsp->GetType() != TYPE_JOB) {
strcpy(g->Message, "Target is not an array or object");
Value->Reset();
} else
Value->SetValue_psz(Serialize(g, jsp, NULL, 0));
return Value;
} // end of MakeJson
/*********************************************************************************/
/* SetValue: Set a value from a JVALUE contains. */
/*********************************************************************************/
@ -350,6 +301,7 @@ void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val)
if (Jb) {
vp->SetValue_psz(Serialize(g, val->GetJsp(), NULL, 0));
Jb = false;
} else switch (val->GetValType()) {
case TYPE_DTM:
case TYPE_STRG:
@ -394,6 +346,52 @@ void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val)
} // end of SetJsonValue
/*********************************************************************************/
/* MakeJson: Serialize the json item and set value to it. */
/*********************************************************************************/
PJVAL JSNX::MakeJson(PGLOBAL g, PJSON jsp, int n)
{
Jb = false;
if (Value->IsTypeNum()) {
strcpy(g->Message, "Cannot make Json for a numeric value");
return NULL;
} else if (jsp->GetType() != TYPE_JAR && jsp->GetType() != TYPE_JOB) {
strcpy(g->Message, "Target is not an array or object");
return NULL;
} else if (n < Nod -1) {
if (jsp->GetType() == TYPE_JAR) {
int ars = jsp->GetSize(false);
PJNODE jnp = &Nodes[n];
PJAR jarp = new(g) JARRAY;
jnp->Op = OP_EQ;
for (jnp->Rank = 0; jnp->Rank < ars; jnp->Rank++)
jarp->AddArrayValue(g, GetRowValue(g, jsp, n));
jarp->InitArray(g);
jnp->Op = OP_XX;
jnp->Rank = 0;
jsp = jarp;
} else if(jsp->GetType() == TYPE_JOB) {
PJSON jp;
PJOB jobp = new(g) JOBJECT;
for (PJPR prp = ((PJOB)jsp)->GetFirst(); prp; prp = prp->Next) {
jp = (prp->Val->DataType == TYPE_JSON) ? prp->Val->Jsp : prp->Val;
jobp->SetKeyValue(g, GetRowValue(g, jp, n + 1), prp->Key);
} // endfor prp
jsp = jobp;
} // endif Type
} // endif
Jb = true;
return new(g) JVALUE(jsp);
} // end of MakeJson
/*********************************************************************************/
/* GetJson: */
/*********************************************************************************/
@ -437,8 +435,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b)
val = new(g) JVALUE(g, Value);
return val;
} else if (Nodes[i].Op == OP_XX) {
Jb = b;
return new(g)JVALUE(row);
return MakeJson(g, row, i);
} else switch (row->GetType()) {
case TYPE_JOB:
if (!Nodes[i].Key) {
@ -504,6 +501,88 @@ PVAL JSNX::ExpandArray(PGLOBAL g, PJAR arp, int n)
return NULL;
} // end of ExpandArray
/*********************************************************************************/
/* Get the value used for calculating the array. */
/*********************************************************************************/
PVAL JSNX::GetCalcValue(PGLOBAL g, PJAR jap, int n)
{
// For calculated arrays, a local Value must be used
int lng = 0;
short type, prec = 0;
bool b = n < Nod - 1;
PVAL valp;
PJVAL vlp, vp;
OPVAL op = Nodes[n].Op;
switch (op) {
case OP_NUM:
type = TYPE_INT;
break;
case OP_ADD:
case OP_MULT:
if (!IsTypeNum(Buf_Type)) {
type = TYPE_INT;
prec = 0;
for (vlp = jap->GetArrayValue(0); vlp; vlp = vlp->Next) {
vp = (b && vlp->GetJsp()) ? GetRowValue(g, vlp, n + 1) : vlp;
switch (vp->DataType) {
case TYPE_BINT:
if (type == TYPE_INT)
type = TYPE_BIGINT;
break;
case TYPE_DBL:
case TYPE_FLOAT:
type = TYPE_DOUBLE;
prec = MY_MAX(prec, vp->Nd);
break;
default:
break;
} // endswitch Type
} // endfor vlp
} else {
type = Buf_Type;
prec = GetPrecision();
} // endif Buf_Type
break;
case OP_SEP:
if (IsTypeChar(Buf_Type)) {
type = TYPE_DOUBLE;
prec = 2;
} else {
type = Buf_Type;
prec = GetPrecision();
} // endif Buf_Type
break;
case OP_MIN:
case OP_MAX:
type = Buf_Type;
lng = Long;
prec = GetPrecision();
break;
case OP_CNC:
type = TYPE_STRING;
if (IsTypeChar(Buf_Type)) {
lng = (Long) ? Long : 512;
prec = GetPrecision();
} else
lng = 512;
break;
default:
break;
} // endswitch Op
return valp = AllocateValue(g, type, lng, prec);
} // end of GetCalcValue
/*********************************************************************************/
/* CalculateArray: */
/*********************************************************************************/
@ -512,7 +591,8 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n)
int i, ars = arp->size(), nv = 0;
bool err;
OPVAL op = Nodes[n].Op;
PVAL val[2], vp = Nodes[n].Valp;
PVAL val[2], vp = GetCalcValue(g, arp, n);
PVAL mulval = AllocateValue(g, vp);
PJVAL jvrp, jvp;
JVALUE jval;
@ -545,9 +625,9 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n)
SetJsonValue(g, vp, jvp);
continue;
} else
SetJsonValue(g, MulVal, jvp);
SetJsonValue(g, mulval, jvp);
if (!MulVal->IsNull()) {
if (!mulval->IsNull()) {
switch (op) {
case OP_CNC:
if (Nodes[n].CncVal) {
@ -555,18 +635,18 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n)
err = vp->Compute(g, val, 1, op);
} // endif CncVal
val[0] = MulVal;
val[0] = mulval;
err = vp->Compute(g, val, 1, op);
break;
// case OP_NUM:
case OP_SEP:
val[0] = Nodes[n].Valp;
val[1] = MulVal;
val[0] = vp;
val[1] = mulval;
err = vp->Compute(g, val, 2, OP_ADD);
break;
default:
val[0] = Nodes[n].Valp;
val[1] = MulVal;
val[0] = vp;
val[1] = mulval;
err = vp->Compute(g, val, 2, op);
} // endswitch Op
@ -588,9 +668,9 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n)
if (op == OP_SEP) {
// Calculate average
MulVal->SetValue(nv);
mulval->SetValue(nv);
val[0] = vp;
val[1] = MulVal;
val[1] = mulval;
if (vp->Compute(g, val, 2, OP_DIV))
vp->Reset();

View File

@ -44,7 +44,6 @@ typedef struct _jnode {
PSZ Key; // The key used for object
OPVAL Op; // Operator used for this node
PVAL CncVal; // To cont value used for OP_CNC
PVAL Valp; // The internal array VALUE
int Rank; // The rank in array
int Rx; // Read row number
int Nx; // Next to read row number
@ -334,8 +333,9 @@ protected:
my_bool SetArrayOptions(PGLOBAL g, char *p, int i, PSZ nm);
PVAL GetColumnValue(PGLOBAL g, PJSON row, int i);
PVAL ExpandArray(PGLOBAL g, PJAR arp, int n);
PVAL GetCalcValue(PGLOBAL g, PJAR bap, int n);
PVAL CalculateArray(PGLOBAL g, PJAR arp, int n);
PVAL MakeJson(PGLOBAL g, PJSON jsp);
PJVAL MakeJson(PGLOBAL g, PJSON jsp, int i);
void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val);
PJSON GetRow(PGLOBAL g);
my_bool CompareValues(PJVAL v1, PJVAL v2);
@ -358,7 +358,7 @@ protected:
JOUTSTR *Jp;
JNODE *Nodes; // The intermediate objects
PVAL Value;
PVAL MulVal; // To value used by multiple column
//PVAL MulVal; // To value used by multiple column
char *Jpath; // The json path
int Buf_Type;
int Long;

View File

@ -322,7 +322,7 @@ JsonGet_String(Json_Make_Array(45,28,36,45,89),'3')
45
SELECT JsonGet_String(Json_Make_Array(45,28,36,45,89),'["+"]') "list",'=' as "egal",JsonGet_String(Json_Make_Array(45,28,36,45,89),'[+]') "sum";
list egal sum
45+28+36+45+89 = 243.00
45+28+36+45+89 = 243
SELECT JsonGet_String(Json_Make_Array(Json_Make_Array(45,28),Json_Make_Array(36,45,89)),'1.0');
JsonGet_String(Json_Make_Array(Json_Make_Array(45,28),Json_Make_Array(36,45,89)),'1.0')
36
@ -349,10 +349,10 @@ Warnings:
Warning 1105
SELECT department, JsonGet_String(Json_Make_Object(department, Json_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department;
department Sumsal
0021 28500.00
0318 72230.00
0319 89800.95
2452 45900.00
0021 28500.000000
0318 72230.000000
0319 89800.950000
2452 45900.000000
SELECT JsonGet_Int(@j1, '4');
JsonGet_Int(@j1, '4')
89

View File

@ -1,6 +1,6 @@
/************* tabbson C++ Program Source Code File (.CPP) *************/
/* PROGRAM NAME: tabbson Version 1.0 */
/* (C) Copyright to the author Olivier BERTRAND 2020 */
/* PROGRAM NAME: tabbson Version 1.1 */
/* (C) Copyright to the author Olivier BERTRAND 2020 - 2021 */
/* This program are the BSON class DB execution routines. */
/***********************************************************************/
@ -158,8 +158,9 @@ BSONDISC::BSONDISC(PGLOBAL g, uint* lg)
bp = NULL;
row = NULL;
sep = NULL;
strfy = NULL;
i = n = bf = ncol = lvl = sz = limit = 0;
all = strfy = false;
all = false;
} // end of BSONDISC constructor
int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
@ -173,7 +174,7 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
sep = GetStringTableOption(g, topt, "Separator", ".");
sz = GetIntegerTableOption(g, topt, "Jsize", 1024);
limit = GetIntegerTableOption(g, topt, "Limit", 10);
strfy = GetBooleanTableOption(g, topt, "Stringify", false);
strfy = GetStringTableOption(g, topt, "Stringify", NULL);
/*********************************************************************/
/* Open the input file. */
@ -186,6 +187,9 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
#endif // ZIP_SUPPORT
tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL);
if (!tdp->Fn && topt->http)
tdp->Fn = GetStringTableOption(g, topt, "Subtype", NULL);
if (!(tdp->Database = SetPath(g, db)))
return 0;
@ -199,7 +203,8 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
if (!tdp->Fn && !tdp->Uri) {
strcpy(g->Message, MSG(MISSING_FNAME));
return 0;
} // endif Fn
} else
topt->subtype = NULL;
if (tdp->Fn) {
// We used the file name relative to recorded datapath
@ -428,7 +433,7 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j)
jcol.Type = TYPE_UNKNOWN;
jcol.Len = jcol.Scale = 0;
jcol.Cbn = true;
} else if (j < lvl) {
} else if (j < lvl && !(strfy && !stricmp(strfy, colname))) {
if (!fmt[bf])
strcat(fmt, colname);
@ -499,7 +504,7 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j)
} // endswitch Type
} else if (lvl >= 0) {
if (strfy) {
if (strfy && !stricmp(strfy, colname)) {
if (!fmt[bf])
strcat(fmt, colname);
@ -731,7 +736,6 @@ void BCUTIL::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp)
case TYPE_FLOAT:
switch (vp->GetType()) {
case TYPE_STRING:
case TYPE_DATE:
case TYPE_DECIM:
vp->SetValue_psz(GetString(jvp));
break;
@ -749,6 +753,16 @@ void BCUTIL::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp)
if (jvp->Type == TYPE_DBL || jvp->Type == TYPE_FLOAT)
vp->SetPrec(jvp->Nd);
break;
case TYPE_DATE:
if (jvp->Type == TYPE_STRG) {
if (!((DTVAL*)vp)->IsFormatted())
((DTVAL*)vp)->SetFormat(g, "YYYY-MM-DDThh:mm:ssZ", 20, 0);
vp->SetValue_psz(GetString(jvp));
} else
vp->SetValue(GetInteger(jvp));
break;
default:
sprintf(G->Message, "Unsupported column type %d", vp->GetType());
@ -881,7 +895,7 @@ PBVAL BCUTIL::GetRowValue(PGLOBAL g, PBVAL row, int i)
} // endfor i
return bvp;
} // end of GetColumnValue
} // end of GetRowValue
/***********************************************************************/
/* GetColumnValue: */

View File

@ -44,10 +44,11 @@ public:
PBPR row;
PBTUT bp;
PCSZ sep;
PCSZ strfy;
char colname[65], fmt[129], buf[16];
uint *length;
int i, n, bf, ncol, lvl, sz, limit;
bool all, strfy;
bool all;
}; // end of BSONDISC
/***********************************************************************/

View File

@ -1,6 +1,6 @@
/************* tabjson C++ Program Source Code File (.CPP) *************/
/* PROGRAM NAME: tabjson Version 1.8 */
/* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */
/* (C) Copyright to the author Olivier BERTRAND 2014 - 2021 */
/* This program are the JSON class DB execution routines. */
/***********************************************************************/
#undef BSON_SUPPORT
@ -9,6 +9,8 @@
/* Include relevant sections of the MariaDB header file. */
/***********************************************************************/
#include <my_global.h>
#include <mysqld.h>
#include <sql_error.h>
/***********************************************************************/
/* Include application header files: */
@ -160,22 +162,24 @@ JSONDISC::JSONDISC(PGLOBAL g, uint *lg)
jsp = NULL;
row = NULL;
sep = NULL;
strfy = NULL;
i = n = bf = ncol = lvl = sz = limit = 0;
all = strfy = false;
all = false;
} // end of JSONDISC constructor
int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
{
char filename[_MAX_PATH];
size_t reclg = 0;
bool mgo = (GetTypeID(topt->type) == TAB_MONGO);
PGLOBAL G = NULL;
lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth());
lvl = GetIntegerTableOption(g, topt, "Depth", lvl);
sep = GetStringTableOption(g, topt, "Separator", ".");
sz = GetIntegerTableOption(g, topt, "Jsize", 1024);
strfy = GetStringTableOption(g, topt, "Stringify", NULL);
sz = GetIntegerTableOption(g, topt, "Jsize", 250);
limit = GetIntegerTableOption(g, topt, "Limit", 10);
strfy = GetBooleanTableOption(g, topt, "Stringify", false);
/*********************************************************************/
/* Open the input file. */
@ -187,6 +191,9 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
#endif // ZIP_SUPPORT
tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL);
if (!tdp->Fn && topt->http)
tdp->Fn = GetStringTableOption(g, topt, "Subtype", NULL);
if (!(tdp->Database = SetPath(g, db)))
return 0;
@ -200,7 +207,8 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
if (!tdp->Fn && !tdp->Uri) {
strcpy(g->Message, MSG(MISSING_FNAME));
return 0;
} // endif Fn
} else
topt->subtype = NULL;
if (tdp->Fn) {
// We used the file name relative to recorded datapath
@ -247,11 +255,11 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
jsp = (tjsp->GetDoc()) ? tjsp->GetDoc()->GetArrayValue(0) : NULL;
} else {
if (!((tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0)))) {
if (!mgo) {
if (!mgo && !tdp->Uri) {
sprintf(g->Message, "LRECL must be specified for pretty=%d", tdp->Pretty);
return 0;
} else
tdp->Lrecl = 8192; // Should be enough
} else
tdp->Lrecl = 8192; // Should be enough
} // endif Lrecl
@ -310,7 +318,9 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
case RC_FX:
goto err;
default:
// jsp = tjnp->FindRow(g); // FindRow was done in ReadDB
if (tdp->Pretty != 2)
reclg = strlen(tjnp->To_Line);
jsp = tjnp->Row;
} // endswitch ReadDB
@ -361,7 +371,9 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
case RC_FX:
goto err;
default:
// jsp = tjnp->FindRow(g);
if (tdp->Pretty != 2 && reclg < strlen(tjnp->To_Line))
reclg = strlen(tjnp->To_Line);
jsp = tjnp->Row;
} // endswitch ReadDB
@ -373,8 +385,12 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt)
} // endfor i
if (tdp->Pretty != 2)
if (tdp->Pretty != 2) {
if (!topt->lrecl)
topt->lrecl = reclg + 10;
tjnp->CloseDB(g);
} // endif Pretty
return n;
@ -426,7 +442,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
jcol.Type = TYPE_UNKNOWN;
jcol.Len = jcol.Scale = 0;
jcol.Cbn = true;
} else if (j < lvl) {
} else if (j < lvl && !(strfy && !stricmp(strfy, colname))) {
if (!fmt[bf])
strcat(fmt, colname);
@ -480,9 +496,8 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
strncat(strncat(colname, "_", n), buf, n - 1);
} // endif all
} else {
} else
strncat(fmt, (tdp->Uri ? sep : "[*]"), n);
}
if (Find(g, jar->GetArrayValue(k), "", j))
return true;
@ -497,7 +512,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j)
} // endswitch Type
} else if (lvl >= 0) {
if (strfy) {
if (strfy && !stricmp(strfy, colname)) {
if (!fmt[bf])
strcat(fmt, colname);
@ -1610,7 +1625,7 @@ PSZ JSONCOL::GetJpath(PGLOBAL g, bool proj)
/***********************************************************************/
/* MakeJson: Serialize the json item and set value to it. */
/***********************************************************************/
PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp)
PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp, int n)
{
if (Value->IsTypeNum()) {
strcpy(g->Message, "Cannot make Json for a numeric column");
@ -1621,6 +1636,7 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp)
} // endif Warned
Value->Reset();
return Value;
#if 0
} else if (Value->GetType() == TYPE_BIN) {
if ((unsigned)Value->GetClen() >= sizeof(BSON)) {
@ -1634,12 +1650,65 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp)
Value->SetValue_char(NULL, 0);
} // endif Clen
#endif // 0
} else
Value->SetValue_psz(Serialize(g, jsp, NULL, 0));
} else if (n < Nod - 1) {
if (jsp->GetType() == TYPE_JAR) {
int ars = jsp->GetSize(false);
PJNODE jnp = &Nodes[n];
PJAR jvp = new(g) JARRAY;
for (jnp->Rank = 0; jnp->Rank < ars; jnp->Rank++)
jvp->AddArrayValue(g, GetRowValue(g, jsp, n));
jnp->Rank = 0;
jvp->InitArray(g);
jsp = jvp;
} else if (jsp->Type == TYPE_JOB) {
PJOB jvp = new(g) JOBJECT;
for (PJPR prp = ((PJOB)jsp)->GetFirst(); prp; prp = prp->Next)
jvp->SetKeyValue(g, GetRowValue(g, prp->Val, n + 1), prp->Key);
jsp = jvp;
} // endif Type
} // endif
Value->SetValue_psz(Serialize(g, jsp, NULL, 0));
return Value;
} // end of MakeJson
/***********************************************************************/
/* GetRowValue: */
/***********************************************************************/
PJVAL JSONCOL::GetRowValue(PGLOBAL g, PJSON row, int i)
{
int n = Nod - 1;
PJVAL val = NULL;
for (; i < Nod && row; i++) {
switch (row->GetType()) {
case TYPE_JOB:
val = (Nodes[i].Key) ? ((PJOB)row)->GetKeyValue(Nodes[i].Key) : NULL;
break;
case TYPE_JAR:
val = ((PJAR)row)->GetArrayValue(Nodes[i].Rank);
break;
case TYPE_JVAL:
val = (PJVAL)row;
break;
default:
sprintf(g->Message, "Invalid row JSON type %d", row->GetType());
val = NULL;
} // endswitch Type
if (i < Nod-1)
row = (val) ? val->GetJson() : NULL;
} // endfor i
return val;
} // end of GetRowValue
/***********************************************************************/
/* SetValue: Set a value from a JVALUE contains. */
/***********************************************************************/
@ -1656,7 +1725,6 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL jvp)
case TYPE_DTM:
switch (vp->GetType()) {
case TYPE_STRING:
case TYPE_DATE:
vp->SetValue_psz(jvp->GetString(g));
break;
case TYPE_INT:
@ -1674,7 +1742,17 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL jvp)
vp->SetPrec(jvp->Nd);
break;
default:
case TYPE_DATE:
if (jvp->GetValType() == TYPE_STRG) {
if (!((DTVAL*)vp)->IsFormatted())
((DTVAL*)vp)->SetFormat(g, "YYYY-MM-DDThh:mm:ssZ", 20, 0);
vp->SetValue_psz(jvp->GetString(g));
} else
vp->SetValue(jvp->GetInteger());
break;
default:
sprintf(g->Message, "Unsupported column type %d\n", vp->GetType());
throw 888;
} // endswitch Type
@ -1740,7 +1818,7 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i)
Value->SetValue(row->GetType() == TYPE_JAR ? ((PJAR)row)->size() : 1);
return(Value);
} else if (Nodes[i].Op == OP_XX) {
return MakeJson(G, row);
return MakeJson(G, row, i);
} else switch (row->GetType()) {
case TYPE_JOB:
if (!Nodes[i].Key) {

View File

@ -1,7 +1,7 @@
/*************** tabjson H Declares Source Code File (.H) **************/
/* Name: tabjson.h Version 1.3 */
/* */
/* (C) Copyright to the author Olivier BERTRAND 2014 - 2018 */
/* (C) Copyright to the author Olivier BERTRAND 2014 - 2021 */
/* */
/* This file contains the JSON classes declares. */
/***********************************************************************/
@ -67,10 +67,11 @@ public:
PJSON jsp;
PJOB row;
PCSZ sep;
PCSZ strfy;
char colname[65], fmt[129], buf[16];
uint *length;
int i, n, bf, ncol, lvl, sz, limit;
bool all, strfy;
bool all;
}; // end of JSONDISC
/***********************************************************************/
@ -230,8 +231,9 @@ public:
PVAL GetColumnValue(PGLOBAL g, PJSON row, int i);
PVAL ExpandArray(PGLOBAL g, PJAR arp, int n);
PVAL CalculateArray(PGLOBAL g, PJAR arp, int n);
PVAL MakeJson(PGLOBAL g, PJSON jsp);
void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val);
PVAL MakeJson(PGLOBAL g, PJSON jsp, int n);
PJVAL GetRowValue(PGLOBAL g, PJSON row, int i);
void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val);
PJSON GetRow(PGLOBAL g);
// Default constructor not to be used

View File

@ -1,8 +1,7 @@
/************** tabrest C++ Program Source Code File (.CPP) ************/
/* PROGRAM NAME: tabrest Version 1.8 */
/* (C) Copyright to the author Olivier BERTRAND 2018 - 2020 */
/* PROGRAM NAME: tabrest Version 2.0 */
/* (C) Copyright to the author Olivier BERTRAND 2018 - 2021 */
/* This program is the REST Web API support for MariaDB. */
/* When compiled without MARIADB defined, it is the EOM module code. */
/* The way Connect handles NOSQL data returned by REST queries is */
/* just by retrieving it as a file and then leave the existing data */
/* type tables (JSON, XML or CSV) process it as usual. */
@ -11,23 +10,13 @@
/***********************************************************************/
/* Definitions needed by the included files. */
/***********************************************************************/
#if defined(MARIADB)
#include <my_global.h> // All MariaDB stuff
#include <mysqld.h>
#include <sql_error.h>
#else // !MARIADB OEM module
#include "mini-global.h"
#define _MAX_PATH 260
#if !defined(REST_SOURCE)
#if defined(__WIN__) || defined(_WINDOWS)
#include <windows.h>
#else // !__WIN__
#define __stdcall
#include <dlfcn.h> // dlopen(), dlclose(), dlsym() ...
#endif // !__WIN__
#endif // !REST_SOURCE
#define _OS_H_INCLUDED // Prevent os.h to be called
#endif // !MARIADB
#if !defined(__WIN__) && !defined(_WINDOWS)
#include <sys/types.h>
#include <sys/wait.h>
#endif // !__WIN__ && !_WINDOWS
/***********************************************************************/
/* Include application header files: */
@ -53,74 +42,98 @@
#define PUSH_WARNING(M) htrc(M)
#endif
#if defined(__WIN__) || defined(_WINDOWS)
#define popen _popen
#define pclose _pclose
#endif
static XGETREST getRestFnc = NULL;
static int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename);
#if !defined(MARIADB)
/***********************************************************************/
/* DB static variables. */
/***********************************************************************/
int TDB::Tnum;
int DTVAL::Shift;
int CSORT::Limit = 0;
double CSORT::Lg2 = log(2.0);
size_t CSORT::Cpn[1000] = { 0 };
/***********************************************************************/
/* These functions are exported from the REST library. */
/***********************************************************************/
extern "C" {
PTABDEF __stdcall GetREST(PGLOBAL, void*);
PQRYRES __stdcall ColREST(PGLOBAL, PTOS, char*, char*, bool);
} // extern "C"
/***********************************************************************/
/* This function returns a table definition class. */
/***********************************************************************/
PTABDEF __stdcall GetREST(PGLOBAL g, void *memp)
{
return new(g, memp) RESTDEF;
} // end of GetREST
#endif // !MARIADB
/***********************************************************************/
/* Xcurl: retrieve the REST answer by executing cURL. */
/***********************************************************************/
int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename)
{
char buf[1024];
int rc;
FILE *pipe;
char buf[512];
int rc = 0;
if (strchr(filename, '"')) {
strcpy(g->Message, "Invalid file name");
return 1;
} // endif filename
if (Uri) {
if (*Uri == '/' || Http[strlen(Http) - 1] == '/')
sprintf(buf, "curl %s%s -o %s", Http, Uri, filename);
sprintf(buf, "%s%s", Http, Uri);
else
sprintf(buf, "curl %s/%s -o %s", Http, Uri, filename);
sprintf(buf, "%s/%s", Http, Uri);
} else
sprintf(buf, "curl %s -o %s", Http, filename);
strcpy(buf, Http);
if ((pipe = popen(buf, "rt"))) {
if (trace(515))
while (fgets(buf, sizeof(buf), pipe)) {
htrc("%s", buf);
} // endwhile
#if defined(__WIN__)
char cmd[1024];
STARTUPINFO si;
PROCESS_INFORMATION pi;
pclose(pipe);
rc = 0;
sprintf(cmd, "curl \"%s\" -o \"%s\"", buf, filename);
ZeroMemory(&si, sizeof(si));
si.cb = sizeof(si);
ZeroMemory(&pi, sizeof(pi));
// Start the child process.
if (CreateProcess(NULL, cmd, NULL, NULL, FALSE, 0, NULL, NULL, &si, &pi)) {
// Wait until child process exits.
WaitForSingleObject(pi.hProcess, INFINITE);
// Close process and thread handles.
CloseHandle(pi.hProcess);
CloseHandle(pi.hThread);
} else {
sprintf(g->Message, "curl failed, errno =%d", errno);
sprintf(g->Message, "CreateProcess curl failed (%d)", GetLastError());
rc = 1;
} // endif pipe
} // endif CreateProcess
#else // !__WIN__
char fn[600];
pid_t pID;
// Check if curl package is availabe by executing subprocess
FILE *f= popen("command -v curl", "r");
if (!f) {
strcpy(g->Message, "Problem in allocating memory.");
return 1;
} else {
char temp_buff[50];
size_t len = fread(temp_buff,1, 50, f);
if(!len) {
strcpy(g->Message, "Curl not installed.");
return 1;
} else
pclose(f);
} // endif f
pID = vfork();
sprintf(fn, "-o%s", filename);
if (pID == 0) {
// Code executed by child process
execlp("curl", "curl", buf, fn, (char*)NULL);
// If execlp() is successful, we should not reach this next line.
strcpy(g->Message, "Unsuccessful execlp from vfork()");
exit(1);
} else if (pID < 0) {
// failed to fork
strcpy(g->Message, "Failed to fork");
rc = 1;
} else {
// Parent process
wait(NULL); // Wait for the child to terminate
} // endif pID
#endif // !__WIN__
return rc;
} // end od Xcurl
} // end of Xcurl
/***********************************************************************/
/* GetREST: load the Rest lib and get the Rest function. */
@ -130,7 +143,7 @@ XGETREST GetRestFunction(PGLOBAL g)
if (getRestFnc)
return getRestFnc;
#if !defined(MARIADB) || !defined(REST_SOURCE)
#if !defined(REST_SOURCE)
if (trace(515))
htrc("Looking for GetRest library\n");
@ -183,9 +196,9 @@ XGETREST GetRestFunction(PGLOBAL g)
return NULL;
} // endif getdef
#endif // !__WIN__
#else
#else // REST_SOURCE
getRestFnc = restGetFile;
#endif
#endif // REST_SOURCE
return getRestFnc;
} // end of GetRestFunction
@ -193,30 +206,21 @@ XGETREST GetRestFunction(PGLOBAL g)
/***********************************************************************/
/* Return the columns definition to MariaDB. */
/***********************************************************************/
#if defined(MARIADB)
PQRYRES RESTColumns(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
#else // !MARIADB
PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
#endif // !MARIADB
{
PQRYRES qrp= NULL;
char filename[_MAX_PATH + 1]; // MAX PATH ???
int rc;
bool curl = false;
PCSZ http, uri, fn, ftype;
XGETREST grf = GetRestFunction(g);
XGETREST grf = NULL;
bool curl = GetBooleanTableOption(g, tp, "Curl", false);
if (!grf)
if (!curl && !(grf = GetRestFunction(g)))
curl = true;
http = GetStringTableOption(g, tp, "Http", NULL);
uri = GetStringTableOption(g, tp, "Uri", NULL);
#if defined(MARIADB)
ftype = GetStringTableOption(g, tp, "Type", "JSON");
#else // !MARIADB
// OEM tables must specify the file type
ftype = GetStringTableOption(g, tp, "Ftype", "JSON");
#endif // !MARIADB
fn = GetStringTableOption(g, tp, "Filename", NULL);
if (!fn) {
@ -230,28 +234,25 @@ PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info)
filename[n + i] = tolower(ftype[i]);
fn = filename;
tp->filename = PlugDup(g, fn);
tp->subtype = PlugDup(g, fn);
sprintf(g->Message, "No file name. Table will use %s", fn);
PUSH_WARNING(g->Message);
} // endif fn
// We used the file name relative to recorded datapath
PlugSetPath(filename, fn, db);
curl = GetBooleanTableOption(g, tp, "Curl", curl);
remove(filename);
// Retrieve the file from the web and copy it locally
if (curl)
rc = Xcurl(g, http, uri, filename);
else if (grf)
else
rc = grf(g->Message, trace(515), http, uri, filename);
else {
strcpy(g->Message, "Cannot access to curl nor casablanca");
rc = 1;
} // endif !grf
if (rc)
if (rc) {
strcpy(g->Message, "Cannot access to curl nor casablanca");
return NULL;
else if (!stricmp(ftype, "JSON"))
} else if (!stricmp(ftype, "JSON"))
qrp = JSONColumns(g, db, NULL, tp, info);
else if (!stricmp(ftype, "CSV"))
qrp = CSVColumns(g, NULL, tp, info);
@ -274,19 +275,15 @@ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
{
char filename[_MAX_PATH + 1];
int rc = 0, n;
bool curl = false, xt = trace(515);
bool xt = trace(515);
LPCSTR ftype;
XGETREST grf = GetRestFunction(g);
XGETREST grf = NULL;
bool curl = GetBoolCatInfo("Curl", false);
if (!grf)
if (!curl && !(grf = GetRestFunction(g)))
curl = true;
#if defined(MARIADB)
ftype = GetStringCatInfo(g, "Type", "JSON");
#else // !MARIADB
// OEM tables must specify the file type
ftype = GetStringCatInfo(g, "Ftype", "JSON");
#endif // !MARIADB
if (xt)
htrc("ftype = %s am = %s\n", ftype, SVP(am));
@ -309,24 +306,21 @@ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff)
// We used the file name relative to recorded datapath
PlugSetPath(filename, Fn, GetPath());
curl = GetBoolCatInfo("Curl", curl);
remove(filename);
// Retrieve the file from the web and copy it locally
if (curl) {
rc = Xcurl(g, Http, Uri, filename);
xtrc(515, "Return from Xcurl: rc=%d\n", rc);
} else if (grf) {
} else {
rc = grf(g->Message, xt, Http, Uri, filename);
xtrc(515, "Return from restGetFile: rc=%d\n", rc);
} else {
strcpy(g->Message, "Cannot access to curl nor casablanca");
rc = 1;
} // endif !grf
} // endelse
if (rc)
return true;
else switch (n) {
if (rc) {
// strcpy(g->Message, "Cannot access to curl nor casablanca");
return true;
} else switch (n) {
case 1: Tdp = new (g) JSONDEF; break;
#if defined(XML_SUPPORT)
case 2: Tdp = new (g) XMLDEF; break;

View File

@ -148,14 +148,21 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info)
/* Open the input file. */
/*********************************************************************/
if (!(fn = GetStringTableOption(g, topt, "Filename", NULL))) {
strcpy(g->Message, MSG(MISSING_FNAME));
return NULL;
} else {
lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth());
lvl = GetIntegerTableOption(g, topt, "Depth", lvl);
lvl = (lvl < 0) ? 0 : (lvl > 16) ? 16 : lvl;
if (topt->http) // REST table can have default filename
fn = GetStringTableOption(g, topt, "Subtype", NULL);
if (!fn) {
strcpy(g->Message, MSG(MISSING_FNAME));
return NULL;
} else
topt->subtype = NULL;
} // endif fn
lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth());
lvl = GetIntegerTableOption(g, topt, "Depth", lvl);
lvl = (lvl < 0) ? 0 : (lvl > 16) ? 16 : lvl;
if (trace(1))
htrc("File %s lvl=%d\n", topt->filename, lvl);

View File

@ -69,7 +69,7 @@ class VALBLK : public BLOCK {
int GetPrec(void) {return Prec;}
void SetCheck(bool b) {Check = b;}
void MoveNull(int i, int j)
{if (To_Nulls) To_Nulls[j] = To_Nulls[j];}
{if (To_Nulls) To_Nulls[j] = To_Nulls[i];}
virtual void SetNull(int n, bool b)
{if (To_Nulls) {To_Nulls[n] = (b) ? '*' : 0;}}
virtual bool IsNull(int n) {return To_Nulls && To_Nulls[n];}

View File

@ -6587,3 +6587,9 @@ dict_table_t::get_overflow_field_local_len() const
/* up to MySQL 5.1: store a 768-byte prefix locally */
return BTR_EXTERN_FIELD_REF_SIZE + DICT_ANTELOPE_MAX_INDEX_COL_LEN;
}
bool dict_table_t::is_stats_table() const
{
return !strcmp(name.m_name, TABLE_STATS_NAME) ||
!strcmp(name.m_name, INDEX_STATS_NAME);
}

View File

@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2014, 2020, MariaDB Corporation.
Copyright (c) 1995, 2021, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2014, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@ -3777,7 +3777,7 @@ fil_ibd_load(
space = fil_space_get_by_id(space_id);
mutex_exit(&fil_system.mutex);
if (space != NULL) {
if (space) {
/* Compare the filename we are trying to open with the
filename from the first node of the tablespace we opened
previously. Fail if it is different. */
@ -3789,8 +3789,8 @@ fil_ibd_load(
<< "' with space ID " << space->id
<< ". Another data file called " << node->name
<< " exists with the same space ID.";
space = NULL;
return(FIL_LOAD_ID_CHANGED);
space = NULL;
return(FIL_LOAD_ID_CHANGED);
}
return(FIL_LOAD_OK);
}
@ -3827,13 +3827,6 @@ fil_ibd_load(
os_offset_t minimum_size;
case DB_SUCCESS:
if (file.space_id() != space_id) {
ib::info()
<< "Ignoring data file '"
<< file.filepath()
<< "' with space ID " << file.space_id()
<< ", since the redo log references "
<< file.filepath() << " with space ID "
<< space_id << ".";
return(FIL_LOAD_ID_CHANGED);
}
/* Get and test the file size. */

View File

@ -5270,13 +5270,19 @@ ha_innobase::index_type(
{
dict_index_t* index = innobase_get_index(keynr);
if (index && index->type & DICT_FTS) {
return("FULLTEXT");
} else if (dict_index_is_spatial(index)) {
return("SPATIAL");
} else {
return("BTREE");
if (!index) {
return "Corrupted";
}
if (index->type & DICT_FTS) {
return("FULLTEXT");
}
if (dict_index_is_spatial(index)) {
return("SPATIAL");
}
return("BTREE");
}
/****************************************************************//**
@ -18835,6 +18841,33 @@ innodb_log_checksums_update(THD* thd, st_mysql_sys_var*, void* var_ptr,
thd, *static_cast<const my_bool*>(save));
}
#ifdef UNIV_DEBUG
static
void
innobase_debug_sync_callback(srv_slot_t *slot, const void *value)
{
const char *value_str = *static_cast<const char* const*>(value);
size_t len = strlen(value_str) + 1;
// One allocation for list node object and value.
void *buf = ut_malloc_nokey(sizeof(srv_slot_t::debug_sync_t) + len-1);
srv_slot_t::debug_sync_t *sync = new(buf) srv_slot_t::debug_sync_t();
strcpy(sync->str, value_str);
rw_lock_x_lock(&slot->debug_sync_lock);
UT_LIST_ADD_LAST(slot->debug_sync, sync);
rw_lock_x_unlock(&slot->debug_sync_lock);
}
static
void
innobase_debug_sync_set(THD *thd, st_mysql_sys_var*, void *, const void *value)
{
srv_for_each_thread(SRV_WORKER, innobase_debug_sync_callback, value);
srv_for_each_thread(SRV_PURGE, innobase_debug_sync_callback, value);
}
#endif
static SHOW_VAR innodb_status_variables_export[]= {
{"Innodb", (char*) &show_innodb_vars, SHOW_FUNC},
{NullS, NullS, SHOW_LONG}
@ -20399,6 +20432,16 @@ static MYSQL_SYSVAR_BOOL(debug_force_scrubbing,
0,
"Perform extra scrubbing to increase test exposure",
NULL, NULL, FALSE);
char *innobase_debug_sync;
static MYSQL_SYSVAR_STR(debug_sync, innobase_debug_sync,
PLUGIN_VAR_NOCMDARG,
"debug_sync for innodb purge threads. "
"Use it to set up sync points for all purge threads "
"at once. The commands will be applied sequentially at"
" the beginning of purging the next undo record.",
NULL,
innobase_debug_sync_set, NULL);
#endif /* UNIV_DEBUG */
static MYSQL_SYSVAR_BOOL(encrypt_temporary_tables, innodb_encrypt_temporary_tables,
@ -20612,6 +20655,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(background_scrub_data_check_interval),
#ifdef UNIV_DEBUG
MYSQL_SYSVAR(debug_force_scrubbing),
MYSQL_SYSVAR(debug_sync),
#endif
MYSQL_SYSVAR(buf_dump_status_frequency),
MYSQL_SYSVAR(background_thread),

View File

@ -2880,7 +2880,8 @@ i_s_fts_deleted_generic_fill(
if (!user_table) {
rw_lock_s_unlock(&dict_operation_lock);
DBUG_RETURN(0);
} else if (!dict_table_has_fts_index(user_table)) {
} else if (!dict_table_has_fts_index(user_table)
|| !user_table->is_readable()) {
dict_table_close(user_table, FALSE, FALSE);
rw_lock_s_unlock(&dict_operation_lock);
DBUG_RETURN(0);

View File

@ -2115,6 +2115,11 @@ public:
return true;
return false;
}
/** Check whether the table name is same as mysql/innodb_stats_table
or mysql/innodb_index_stats.
@return true if the table name is same as stats table */
bool is_stats_table() const;
};
inline void dict_index_t::set_modified(mtr_t& mtr) const

View File

@ -1119,6 +1119,16 @@ struct srv_slot_t{
to do */
que_thr_t* thr; /*!< suspended query thread
(only used for user threads) */
#ifdef UNIV_DEBUG
struct debug_sync_t {
UT_LIST_NODE_T(debug_sync_t)
debug_sync_list;
char str[1];
};
UT_LIST_BASE_NODE_T(debug_sync_t)
debug_sync;
rw_lock_t debug_sync_lock;
#endif
};
#ifdef UNIV_DEBUG

View File

@ -1119,6 +1119,10 @@ public:
ut_ad(old_n_ref > 0);
}
/** @return whether the table has lock on
mysql.innodb_table_stats and mysql.innodb_index_stats */
bool has_stats_table_lock() const;
/** Free the memory to trx_pools */
void free();

View File

@ -192,28 +192,33 @@ wsrep_is_BF_lock_timeout(
const trx_t* trx,
bool locked = true)
{
if (trx->is_wsrep() && wsrep_thd_is_BF(trx->mysql_thd, FALSE)
&& trx->error_state != DB_DEADLOCK) {
ib::info() << "WSREP: BF lock wait long for trx:" << ib::hex(trx->id)
bool long_wait= (trx->error_state != DB_DEADLOCK &&
trx->is_wsrep() &&
wsrep_thd_is_BF(trx->mysql_thd, false));
bool was_wait= true;
DBUG_EXECUTE_IF("wsrep_instrument_BF_lock_wait",
was_wait=false; long_wait=true;);
if (long_wait) {
ib::info() << "WSREP: BF lock wait long for trx:" << trx->id
<< " query: " << wsrep_thd_query(trx->mysql_thd);
if (!locked) {
if (!locked)
lock_mutex_enter();
}
ut_ad(lock_mutex_own());
trx_print_latched(stderr, trx, 3000);
/* Note this will release lock_sys mutex */
lock_print_info_all_transactions(stderr);
if (!locked) {
lock_mutex_exit();
}
if (locked)
lock_mutex_enter();
srv_print_innodb_monitor = TRUE;
srv_print_innodb_lock_monitor = TRUE;
os_event_set(srv_monitor_event);
return true;
}
return false;
return was_wait;
} else
return false;
}
#endif /* WITH_WSREP */

View File

@ -46,6 +46,7 @@ Created 3/14/1997 Heikki Tuuri
#include "handler.h"
#include "ha_innodb.h"
#include "fil0fil.h"
#include "debug_sync.h"
/*************************************************************************
IMPORTANT NOTE: Any operation that generates redo MUST check that there
@ -1306,6 +1307,25 @@ row_purge_step(
node->start();
#ifdef UNIV_DEBUG
srv_slot_t *slot = thr->thread_slot;
ut_ad(slot);
rw_lock_x_lock(&slot->debug_sync_lock);
while (UT_LIST_GET_LEN(slot->debug_sync)) {
srv_slot_t::debug_sync_t *sync =
UT_LIST_GET_FIRST(slot->debug_sync);
bool result = debug_sync_set_action(current_thd,
sync->str,
strlen(sync->str));
ut_a(!result);
UT_LIST_REMOVE(slot->debug_sync, sync);
ut_free(sync);
}
rw_lock_x_unlock(&slot->debug_sync_lock);
#endif
if (!(node->undo_recs == NULL || ib_vector_is_empty(node->undo_recs))) {
trx_purge_rec_t*purge_rec;

View File

@ -466,6 +466,7 @@ row_vers_build_clust_v_col(
vcol_info->set_used();
maria_table = vcol_info->table();
}
DEBUG_SYNC(current_thd, "ib_clust_v_col_before_row_allocated");
ib_vcol_row vc(NULL);
byte *record = vc.record(thd, index, &maria_table);

View File

@ -2518,6 +2518,13 @@ DECLARE_THREAD(srv_worker_thread)(
slot = srv_reserve_slot(SRV_WORKER);
#ifdef UNIV_DEBUG
UT_LIST_INIT(slot->debug_sync,
&srv_slot_t::debug_sync_t::debug_sync_list);
rw_lock_create(PFS_NOT_INSTRUMENTED, &slot->debug_sync_lock,
SYNC_NO_ORDER_CHECK);
#endif
ut_a(srv_n_purge_threads > 1);
ut_a(ulong(my_atomic_loadlint(&srv_sys.n_threads_active[SRV_WORKER]))
< srv_n_purge_threads);
@ -2539,6 +2546,8 @@ DECLARE_THREAD(srv_worker_thread)(
}
} while (purge_sys.enabled());
ut_d(rw_lock_free(&slot->debug_sync_lock));
srv_free_slot(slot);
ut_ad(!purge_sys.enabled());
@ -2739,6 +2748,12 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
slot = srv_reserve_slot(SRV_PURGE);
#ifdef UNIV_DEBUG
UT_LIST_INIT(slot->debug_sync,
&srv_slot_t::debug_sync_t::debug_sync_list);
rw_lock_create(PFS_NOT_INSTRUMENTED, &slot->debug_sync_lock,
SYNC_NO_ORDER_CHECK);
#endif
ulint rseg_history_len = trx_sys.history_size();
do {
@ -2767,6 +2782,8 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
shutdown state. */
ut_a(srv_get_task_queue_length() == 0);
ut_d(rw_lock_free(&slot->debug_sync_lock));
srv_free_slot(slot);
/* Note that we are shutting down. */

View File

@ -805,7 +805,8 @@ void trx_rollback_recovered(bool all)
srv_fast_shutdown)
goto discard;
if (all || trx_get_dict_operation(trx) != TRX_DICT_OP_NONE)
if (all || trx_get_dict_operation(trx) != TRX_DICT_OP_NONE
|| trx->has_stats_table_lock())
{
trx_rollback_active(trx);
if (trx->error_state != DB_SUCCESS)

View File

@ -2404,3 +2404,16 @@ trx_set_rw_mode(
trx->read_view.set_creator_trx_id(trx->id);
}
}
bool trx_t::has_stats_table_lock() const
{
for (lock_list::const_iterator it= lock.table_locks.begin(),
end= lock.table_locks.end(); it != end; ++it)
{
const lock_t *lock= *it;
if (lock && lock->un_member.tab_lock.table->is_stats_table())
return true;
}
return false;
}

View File

@ -17,7 +17,7 @@
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
cmake_minimum_required(VERSION 2.6)
cmake_minimum_required(VERSION 2.8.12)
project(mroonga)
if("${CMAKE_SOURCE_DIR}" STREQUAL "${CMAKE_CURRENT_SOURCE_DIR}")

View File

@ -949,7 +949,8 @@ void ha_myisam::setup_vcols_for_repair(HA_CHECK *param)
return;
file->s->vreclength= new_vreclength;
}
DBUG_ASSERT(file->s->base.reclength < file->s->vreclength);
DBUG_ASSERT(file->s->base.reclength < file->s->vreclength ||
!table->s->stored_fields);
param->fix_record= compute_vcols;
table->use_all_columns();
table->vcol_set= &table->s->all_set;

View File

@ -20,8 +20,7 @@
# along with this program; if not, write to the Free Software Foundation,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}
${CMAKE_SOURCE_DIR}/include
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
${CMAKE_SOURCE_DIR}/sql
${CMAKE_BINARY_DIR}/sql
${PCRE_INCLUDES}