mysql-server/mysql-test/t/select_safe.test
2025-03-05 14:31:37 +07:00

429 lines
15 KiB
Text

#
# test of safe selects
#
--disable_warnings
drop table if exists t1;
--enable_warnings
SET SQL_SAFE_UPDATES=1,SQL_SELECT_LIMIT=4, MAX_JOIN_SIZE=8;
create table t1 (a int auto_increment primary key, b char(20)) charset utf8mb4;
insert into t1 values(1,"test");
SELECT SQL_BUFFER_RESULT * from t1;
update t1 set b="a" where a=1;
delete from t1 where a=1;
insert into t1 values(1,"test"),(2,"test2");
SELECT SQL_BUFFER_RESULT * from t1;
update t1 set b="a" where a=1;
select 1 from t1,t1 as t2,t1 as t3;
# The following should give errors:
--error 1175
update t1 set b="a";
--error 1175
update t1 set b="a" where b="test";
--error 1175
delete from t1;
--error 1175
delete from t1 where b="test";
--error 1175
delete from t1 where a+0=1;
# Reads t1 (two rows) five times to build the hash tables. Then reads
# one row from the outermost t1, to produce 32 rows, which hits the
# sql_select_limit of 4. Total number of rows read: 2 * 5 + 1 = 11.
--error ER_TOO_BIG_SELECT
select 1 from t1,t1 as t2,t1 as t3,t1 as t4,t1 as t5, t1 as t6;
--error ER_TOO_BIG_SELECT
select /*+ SET_VAR(max_join_size = 10) */
1 from t1,t1 as t2,t1 as t3,t1 as t4,t1 as t5, t1 as t6;
# The following should be ok:
--skip_if_hypergraph # Different plan hits max_join_size. Bug#33664699.
select /*+ SET_VAR(max_join_size = 11) */
1 from t1,t1 as t2,t1 as t3,t1 as t4,t1 as t5, t1 as t6;
update t1 set b="a" limit 1;
update t1 set b="a" where b="b" limit 2;
delete from t1 where b="test" limit 1;
delete from t1 where a+0=1 limit 2;
# Test SQL_BIG_SELECTS
alter table t1 add key b (b);
SET MAX_JOIN_SIZE=1;
SELECT @@MAX_JOIN_SIZE, @@SQL_BIG_SELECTS;
insert into t1 values (null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a");
--error ER_TOO_BIG_SELECT
SELECT * from t1 order by a;
SET SQL_BIG_SELECTS=1;
SELECT * from t1 order by a;
SET MAX_JOIN_SIZE=1;
--error ER_TOO_BIG_SELECT
SELECT * from t1;
SET MAX_JOIN_SIZE=DEFAULT;
SELECT * from t1 order by a;
#
# Test MAX_SEEKS_FOR_KEY
#
analyze table t1;
insert into t1 values (null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a"),(null,"a");
ANALYZE TABLE t1;
explain select STRAIGHT_JOIN * from t1,t1 as t2 where t1.b=t2.b;
set MAX_SEEKS_FOR_KEY=1;
explain select STRAIGHT_JOIN * from t1,t1 as t2 where t1.b=t2.b;
SET MAX_SEEKS_FOR_KEY=DEFAULT;
drop table t1;
# BUG#8726
create table t1 (a int);
insert into t1 values (1),(2),(3),(4),(5);
insert into t1 select * from t1;
insert into t1 select * from t1;
insert into t1 select * from t1;
analyze table t1;
# No error because SQL_SELECT_LIMIT=4.
set local max_join_size=4;
select * from (select * from t1) x;
set max_join_size=3;
--error ER_TOO_BIG_SELECT
select * from (select * from t1) x;
set local max_join_size=1;
--error ER_TOO_BIG_SELECT
select * from (select a.a as aa, b.a as ba from t1 a, t1 b) x;
set local max_join_size=1;
# Does not access any base tables, so no error is expected.
--sorted_result
select * from (select 1 union select 2 union select 3) x;
drop table t1;
SET SQL_SAFE_UPDATES=0,SQL_SELECT_LIMIT=DEFAULT, MAX_JOIN_SIZE=DEFAULT;
# End of 4.1 tests
--echo #
--echo # Bug #28145710: SQL_SAFE_UPDATES ERROR IS INSUFFICIENTLY INFORMATIVE
--echo #
CREATE TABLE t1 (c1 INT NOT NULL, c2 VARCHAR(200) NOT NULL,
UNIQUE KEY idx1 (c1), UNIQUE KEY idx2 (c2));
CREATE TABLE t2 (c1 INT NOT NULL, c2 VARCHAR(200) NOT NULL,
UNIQUE KEY idx1 (c1));
INSERT INTO t1 VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd');
INSERT INTO t2 VALUES (11, 'a'), (12, 'b'), (3, 'c'), (14, 'd');
ANALYZE TABLE t1, t2;
SET SESSION sql_safe_updates=1;
SET RANGE_OPTIMIZER_MAX_MEM_SIZE= 1;
#DELETE with range_optimizer_max_mem_size warning
let query1= DELETE FROM t1 WHERE c1 IN (1,22);
#UPDATE with range_optimizer_max_mem_size warning
let query2= UPDATE t1 SET c1=20 WHERE c1 IN (1,22);
#multi-table DELETE with conversion warning and target table uses table scan
let query3= DELETE t1 FROM t1 JOIN t2 ON t1.c2 = t2.c1;
#multi-table UPDATE with conversion warning and target table uses table scan
let query4= UPDATE t1, t2 SET t1.c1=20 WHERE t1.c2 = t2.c1;
#multi-table DELETE with target table uses eq_ref (no error)
let query5= DELETE t2 FROM t1 JOIN t2 ON t1.c2 = t2.c1;
#DELETE with conversion warning
let query6= DELETE FROM t1 WHERE c2 IN(1,2222);
#DELETE with conversion warning
let query7= UPDATE t1 SET c1=20 WHERE c2 IN(1,2222);
#DELETE with non-indexed column returns error
let query8= DELETE FROM t2 WHERE c2 IN('a','e');
#DELETE full table and test with binlog disabled
let query9= DELETE FROM t2;
eval EXPLAIN $query1;
--error ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
eval $query1;
eval EXPLAIN $query2;
--error ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
eval $query2;
SET RANGE_OPTIMIZER_MAX_MEM_SIZE= default;
eval EXPLAIN $query3;
--skip_if_hypergraph # Different wording in error message.
--error ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
eval $query3;
eval EXPLAIN $query4;
--skip_if_hypergraph # Different wording in error message.
--error ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
eval $query4;
eval EXPLAIN $query5;
--skip_if_hypergraph # Does not use index. Bug#33583337?
eval $query5;
eval EXPLAIN $query6;
--error ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
eval $query6;
eval EXPLAIN $query7;
--error ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
eval $query7;
eval EXPLAIN $query8;
--error ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
eval $query8;
--replace_column 12 #
eval EXPLAIN $query9;
--error ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
eval $query9;
SET sql_log_bin= 0;
--error ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
eval $query9;
SET sql_log_bin= default;
DROP TABLE t1, t2;
SET SESSION sql_safe_updates=default;
--echo #
--echo # Bug#25118903: MAX_JOIN_SIZE DOES NOT MATCH ROW ESTIMATES
--echo #
# Create a table with 100 rows.
CREATE TABLE t1(id INT PRIMARY KEY, x INT);
INSERT INTO t1
WITH RECURSIVE qn AS (SELECT 1 AS n UNION ALL SELECT 1+n FROM qn WHERE n < 100)
SELECT n, n FROM qn;
# Create a table with 10 rows.
CREATE TABLE t2(id INT PRIMARY KEY, x INT);
INSERT INTO t2 SELECT * FROM t1 WHERE id <= 10;
ANALYZE TABLE t1, t2;
# Hash join reads each table once, so 100 + 10 row accesses.
SELECT /*+ SET_VAR(max_join_size = 110) */
COUNT(*) FROM t1, t2 WHERE t1.x = t2.x;
--error ER_TOO_BIG_SELECT
SELECT /*+ SET_VAR(max_join_size = 109) */
COUNT(*) FROM t1, t2 WHERE t1.x = t2.x;
# Nested loop join reads the outer table once and the inner table once
# per row in the outer table. The inner table is an index lookup on
# t1.id here, accessing only one row per outer row, so expect 10 row
# accesses in t2 + 10 * 1 row accesses in t1.
SELECT /*+ SET_VAR(max_join_size = 20) */
COUNT(*) FROM t1, t2 WHERE t1.id = t2.x;
--error ER_TOO_BIG_SELECT
SELECT /*+ SET_VAR(max_join_size = 19) */
COUNT(*) FROM t1, t2 WHERE t1.id = t2.x;
# UNION needs to read each side once, so there are 100 + 10 row accesses.
SELECT /*+ SET_VAR(max_join_size = 110) */ COUNT(x) FROM t1
UNION ALL SELECT COUNT(x) FROM t2;
SELECT /*+ SET_VAR(max_join_size = 110) */ COUNT(x) FROM t1
UNION SELECT COUNT(x) FROM t2;
--error ER_TOO_BIG_SELECT
SELECT /*+ SET_VAR(max_join_size = 109) */ COUNT(x) FROM t1
UNION ALL SELECT COUNT(x) FROM t2;
--error ER_TOO_BIG_SELECT
SELECT /*+ SET_VAR(max_join_size = 109) */ COUNT(x) FROM t1
UNION SELECT COUNT(x) FROM t2;
# The hash join reads 100 rows from t1 and 10 rows from t2.
# Additionally, 10 rows will be read from t3 for each row returned
# from the join. Since the estimated number of rows returned from the
# join is 100 (which is 10x higher than the actual number), the
# estimated number of row accesses is 100 + 10 + 100 * 10 = 1110.
# (Actually, it's just above 1110 because of inexactness in
# floating-point calculations, so the max_join_size must be 1111 to
# let the query through.)
SELECT /*+ SET_VAR(max_join_size = 1111) */ COUNT(*) FROM t1, t2
WHERE t1.x=t2.x AND (SELECT MAX(t1.id+t2.id+t3.id) FROM t2 AS t3);
--error ER_TOO_BIG_SELECT
SELECT /*+ SET_VAR(max_join_size = 1109) */ COUNT(*) FROM t1, t2
WHERE t1.x=t2.x AND (SELECT MAX(t1.id+t2.id+t3.id) FROM t2 AS t3);
# Variant of the previous query having the subquery in the join
# condition instead of the WHERE clause. Same estimated number of
# accesses (1110).
SET max_join_size = 1111;
--disable_query_log
if (`SELECT NOT FIND_IN_SET('hypergraph_optimizer=on', @@optimizer_switch)`) {
# The old optimizer estimates a "filter_effect" of 1 for the join
# condition, and therefore expects a lot more invocations of the
# subquery, getting a total of 10110 estimated row accesses.
SET max_join_size = 10111;
}
--enable_query_log
SELECT COUNT(*) FROM t1 LEFT JOIN t2
ON t1.x=t2.x AND (SELECT MAX(t1.id+t2.id+t3.id) FROM t2 AS t3);
SET max_join_size = @@max_join_size - 2;
--error ER_TOO_BIG_SELECT
SELECT COUNT(*) FROM t1 LEFT JOIN t2
ON t1.x=t2.x AND (SELECT MAX(t1.id+t2.id+t3.id) FROM t2 AS t3);
SET max_join_size = DEFAULT;
# Test with an uncorrelated subquery which only needs to be executed
# once. The hash join reads 100 + 10 rows. The subquery is executed
# once and reads 10 rows from t2. Total: 100 + 10 + 10 = 120 rows.
SET max_join_size = 120;
--disable_query_log
if (`SELECT NOT FIND_IN_SET('hypergraph_optimizer=on', @@optimizer_switch)`) {
# The old optimizer executes the uncorrelated subquery during
# optimization, and therefore doesn't count the 10 rows read by it.
# Adjust for it.
SET max_join_size = @@max_join_size - 10;
}
--enable_query_log
SELECT COUNT(*) FROM t1, t2
WHERE t1.x=t2.x AND (SELECT DISTINCT t3.x>0 FROM t2 AS t3);
SET max_join_size = @@max_join_size - 1;
--error ER_TOO_BIG_SELECT
SELECT COUNT(*) FROM t1, t2
WHERE t1.x=t2.x AND (SELECT DISTINCT t3.x>0 FROM t2 AS t3);
SET max_join_size = DEFAULT;
# Each derived table is materialized once, so there's one full scan of
# each base table, giving 100 + 10 = 110 rows.
SELECT /*+ SET_VAR(max_join_size=110) */ COUNT(*) FROM
(SELECT DISTINCT x FROM t1) AS dt1,
(SELECT DISTINCT x FROM t2) AS dt2 WHERE dt1.x=dt2.x;
--error ER_TOO_BIG_SELECT
SELECT /*+ SET_VAR(max_join_size=109) */ COUNT(*) FROM
(SELECT DISTINCT x FROM t1) AS dt1,
(SELECT DISTINCT x FROM t2) AS dt2 WHERE dt1.x=dt2.x;
DROP TABLE t1, t2;
--echo # Test that LIMIT is taken into account.
CREATE TABLE t(id INT PRIMARY KEY AUTO_INCREMENT, x INT, y INT, KEY (x));
INSERT INTO t(x, y) VALUES (1, 1), (2, 2), (3, 3), (4, 4), (5, 5),
(6, 6), (7, 7), (8, 8), (9, 9), (10, 10);
INSERT INTO t(x, y) SELECT x, y FROM t;
INSERT INTO t(x, y) SELECT x, y FROM t;
INSERT INTO t(x, y) SELECT x, y FROM t;
ANALYZE TABLE t;
# These queries need to read 10 rows from the base table or index.
SET max_join_size = 9;
--error ER_TOO_BIG_SELECT
SELECT 1 FROM t LIMIT 10;
--error ER_TOO_BIG_SELECT
SELECT x FROM t ORDER BY x LIMIT 10;
--error ER_TOO_BIG_SELECT
SELECT * FROM t ORDER BY id LIMIT 10;
SET max_join_size = 10;
SELECT 1 FROM t LIMIT 10;
SELECT x FROM t ORDER BY x LIMIT 10;
SELECT * FROM t ORDER BY id LIMIT 10;
# Testing a quirk for the old optimizer. Given this query:
#
# SELECT 1 FROM t WHERE y = 3 ORDER BY x LIMIT 3;
#
# The old optimizer chooses the following plan:
#
# -> Limit: 3 row(s) (cost=0.28 rows=0.3)
# -> Filter: (t.y = 3) (cost=0.28 rows=0.3)
# -> Index scan on t using x (cost=0.28 rows=3)
#
# In the row estimate for the index scan, it has ignored the effect of
# the filter on the limit. All the row estimates in this plan should
# have been ten times higher (given the heuristic which estimates 10%
# selectivity on the equality predicate).
#
# max_join_size adjusts for this by taking the row estimate from
# another source for index scans with the old optimizer, and expects
# to read 30 rows.
SET max_join_size = 29;
if (`SELECT FIND_IN_SET('hypergraph_optimizer=on', @@optimizer_switch)`) {
# The hypergraph optimizer chooses a different plan (table scan +
# sort) and estimates a higher number of rows to read.
--disable_query_log
SET max_join_size = 79;
--enable_query_log
}
--error ER_TOO_BIG_SELECT
SELECT 1 FROM t WHERE y = 3 ORDER BY x LIMIT 3;
SET max_join_size = @@max_join_size + 1;
SELECT 1 FROM t WHERE y = 3 ORDER BY x LIMIT 3;
# Ordering on a non-indexed column requires reading the entire table.
--error ER_TOO_BIG_SELECT
SELECT /*+ SET_VAR(max_join_size = 79) */ y FROM t ORDER BY y LIMIT 10;
SELECT /*+ SET_VAR(max_join_size = 80) */ y FROM t ORDER BY y LIMIT 10;
# Assume that aggregation needs to read the entire table (which might
# be too pessimistic for streaming aggregation with explicit
# grouping).
SET max_join_size = 79;
--error ER_TOO_BIG_SELECT
SELECT SUM(y) FROM t LIMIT 10;
--error ER_TOO_BIG_SELECT
SELECT SUM(y) FROM t GROUP BY x LIMIT 10;
--error ER_TOO_BIG_SELECT
SELECT SUM(x) FROM t GROUP BY y LIMIT 10;
SET max_join_size = 80;
SELECT SUM(y) FROM t LIMIT 10;
--sorted_result
SELECT SUM(y) FROM t GROUP BY x LIMIT 10;
--sorted_result
SELECT SUM(x) FROM t GROUP BY y LIMIT 10;
# This should read 3 + 4 rows from the base tables.
SET max_join_size = 6;
--error ER_TOO_BIG_SELECT
(SELECT 1 FROM t LIMIT 3) UNION DISTINCT (SELECT 1 FROM t LIMIT 4);
--error ER_TOO_BIG_SELECT
(SELECT 1 FROM t LIMIT 3) UNION DISTINCT (SELECT 1 FROM t LIMIT 4) LIMIT 1;
--error ER_TOO_BIG_SELECT
(SELECT 1 FROM t LIMIT 3) UNION ALL (SELECT 1 FROM t LIMIT 4);
--error ER_TOO_BIG_SELECT
(SELECT 1 FROM t LIMIT 3) UNION ALL (SELECT 1 FROM t LIMIT 4) LIMIT 10;
SET max_join_size = 7;
(SELECT 1 FROM t LIMIT 3) UNION DISTINCT (SELECT 1 FROM t LIMIT 4);
(SELECT 1 FROM t LIMIT 3) UNION DISTINCT (SELECT 1 FROM t LIMIT 4) LIMIT 1;
(SELECT 1 FROM t LIMIT 3) UNION ALL (SELECT 1 FROM t LIMIT 4);
(SELECT 1 FROM t LIMIT 3) UNION ALL (SELECT 1 FROM t LIMIT 4) LIMIT 10;
# With UNION ALL, a LIMIT on the top-level may further restrict the
# number of rows needed.
SET max_join_size = 4;
--error ER_TOO_BIG_SELECT
(SELECT 1 FROM t LIMIT 3) UNION ALL (SELECT 1 FROM t LIMIT 4) LIMIT 5;
SET max_join_size = 5;
(SELECT 1 FROM t LIMIT 3) UNION ALL (SELECT 1 FROM t LIMIT 4) LIMIT 5;
# Nested loop join. With a join selectivity of 10%, we expect to have
# to read two rows from the outer table before we reach the limit.
# (Because the join returns 80 * 80 * 0.1 = 640 rows. The outer table
# has 80 rows. Since the join returns 8 rows per row in the outer
# table, we expect it's enough to read 2 rows to reach the limit of
# 16.) For each of those 2 rows, we do an index lookup with 10%
# selectivity (8 rows) on the inner table. Total estimated number of
# row accesses: 2 + 2 * 8 = 18.
SET max_join_size = 17;
--error ER_TOO_BIG_SELECT
SELECT 1 FROM t AS t1, t AS t2 WHERE t1.x = t2.x LIMIT 16;
SET max_join_size = @@max_join_size + 1;
SELECT 1 FROM t AS t1, t AS t2 WHERE t1.x = t2.x LIMIT 16;
# Hash join. We have to read the entire build table once. As in the
# nested loop case above, we expect it's enough to read two rows from
# the probe table to reach the limit of 16. Total: 80 + 2 = 82 rows.
SET max_join_size = 81;
--error ER_TOO_BIG_SELECT
SELECT 1 FROM t AS t1, t AS t2 WHERE t1.y = t2.y LIMIT 16;
SET max_join_size = @@max_join_size + 1;
SELECT 1 FROM t AS t1, t AS t2 WHERE t1.y = t2.y LIMIT 16;
SET max_join_size = DEFAULT;
DROP TABLE t;