This is an automated email from the ASF dual-hosted git repository. dataroaring pushed a commit to branch compute_group1 in repository https://gitbox.apache.org/repos/asf/doris.git
commit fecb4e07858cf063b0d77a0a4ea004ceb1fca3d7 Author: Yongqiang YANG <dataroar...@gmail.com> AuthorDate: Fri Sep 13 02:12:12 2024 +0800 fix --- .../test_grant_revoke_compute_group_to_user.groovy | 292 +++++++++++++++++++++ .../cluster/test_warm_up_compute_group.groovy | 265 +++++++++++++++++++ 2 files changed, 557 insertions(+) diff --git a/regression-test/suites/cloud_p0/auth/test_grant_revoke_compute_group_to_user.groovy b/regression-test/suites/cloud_p0/auth/test_grant_revoke_compute_group_to_user.groovy new file mode 100644 index 00000000000..1499681caab --- /dev/null +++ b/regression-test/suites/cloud_p0/auth/test_grant_revoke_compute_group_to_user.groovy @@ -0,0 +1,292 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +suite("test_grant_revoke_compute_group_to_user", "cloud_auth") { + if (!isCloudMode()) { + log.info("not cloud mode just return") + return + } + def role = "admin" + def user1 = "regression_test_cloud_user1" + def user2 = "regression_test_cloud_user2" + def user3 = "regression_test_cloud_user3" + def tbl = "test_auth_tbl" + + sql """drop user if exists ${user1}""" + sql """drop user if exists ${user2}""" + sql """drop user if exists ${user3}""" + sql """drop table if exists ${tbl}""" + + def getCluster = { group -> + def result = sql " SHOW COMPUTE GROUPS; " + for (int i = 0; i < result.size(); i++) { + if (result[i][0] == group) { + return result[i] + } + } + return null + } + + def commonAuth = { result, UserIdentity, Password, Roles, GlobalPrivs -> + assertEquals(UserIdentity as String, result.UserIdentity[0] as String) + assertEquals(Password as String, result.Password[0] as String) + assertEquals(Roles as String, result.Roles[0] as String) + assertEquals(GlobalPrivs as String, result.GlobalPrivs[0] as String) + } + + def getProperty = { property, user -> + def result = null + if (user == "") { + result = sql_return_maparray """SHOW PROPERTY""" + } else { + result = sql_return_maparray """SHOW PROPERTY FOR '${user}'""" + } + result.find { + it.Key == property as String + } + } + + def groups = sql " SHOW COMPUTE GROUPS; " + assertTrue(!groups.isEmpty()) + def validCluster = groups[0][0] + + // 1. change user + // ${user1} admin role + sql """create user ${user1} identified by 'Cloud12345' default role 'admin'""" + result = sql_return_maparray """show grants for '${user1}'""" + commonAuth result, "'${user1}'@'%'" as String, "Yes", "admin", "Admin_priv" + assertNull(result.ComputeGroupPrivs[0]) + + // ${user2} not admin role + sql """create user ${user2} identified by 'Cloud12345'""" + sql """GRANT USAGE_PRIV ON COMPUTE GROUP '${validCluster}' TO '${user2}'""" + // for use default_group:regression_test + sql """grant select_priv on *.*.* to ${user2}""" + + + sql """ + CREATE TABLE ${tbl} ( + `k1` int(11) NULL, + `k2` char(5) NULL + ) + DUPLICATE KEY(`k1`, `k2`) + COMMENT 'OLAP' + DISTRIBUTED BY HASH(`k1`) BUCKETS 1 + PROPERTIES ( + "replication_num"="1" + ); + """ + + sql """ + insert into ${tbl} (k1, k2) values (1, "10"); + """ + + sql """create user ${user3} identified by 'Cloud12345'""" + sql """GRANT SELECT_PRIV ON *.*.* TO '${user3}'@'%'""" + result = connect(user = "${user3}", password = 'Cloud12345', url = context.config.jdbcUrl) { + sql """SHOW COMPUTE GROUPS""" + } + // not grant any group to user3 + assertTrue(result.isEmpty()) + def db = context.dbName + + connect(user = "${user3}", password = 'Cloud12345', url = context.config.jdbcUrl) { + test { + sql """select * from ${db}.${tbl}""" + exception "or you may not have permission to access the current group" + } + } + + // 2. grant group + def group1 = "groupA" + def result + + sql "sync" + + // admin role user can grant group to use + result = connect(user = "${user1}", password = 'Cloud12345', url = context.config.jdbcUrl) { + sql """GRANT USAGE_PRIV ON COMPUTE GROUP '${group1}' TO '${user1}'""" + } + + // case run user(default root), and show grant again, should be same result + result = sql_return_maparray """show grants for '${user1}'""" + commonAuth result, "'${user1}'@'%'" as String, "Yes", "admin", "Admin_priv" + assertTrue((result.ComputeGroupPrivs as String).contains("${group1}: Cluster_usage_priv")) + + sql """GRANT USAGE_PRIV ON COMPUTE GROUP '${group1}' TO '${user1}'""" + result = sql_return_maparray """show grants for '${user1}'""" + commonAuth result, "'${user1}'@'%'" as String, "Yes", "admin", "Admin_priv" + assertTrue((result.ComputeGroupPrivs as String).contains("${group1}: Cluster_usage_priv")) + + connect(user = "${user1}", password = 'Cloud12345', url = context.config.jdbcUrl) { + test { + sql """use @${group1}""" + exception "Cluster ${group1} not exist" + } + result = sql_return_maparray """show grants for '${user1}'""" + commonAuth result, "'${user1}'@'%'", "Yes", "admin", "Admin_priv" + assertTrue((result.ComputeGroupPrivs as String).contains("${group1}: Cluster_usage_priv")) + } + + + sql """GRANT USAGE_PRIV ON COMPUTE GROUP '${group1}' TO '${user2}'""" + try { + result = connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + sql """GRANT USAGE_PRIV ON COMPUTE GROUP '${group1}' TO '${user1}'""" + } + } catch (Exception e) { + assertTrue(e.getMessage().contains("Access denied; you need all [Grant_priv, Cluster_usage_priv] privilege(s) for this operation"), e.getMessage()) + } + sql """REVOKE USAGE_PRIV ON COMPUTE GROUP '${group1}' FROM '${user2}'""" + + // default compute group + sql """SET PROPERTY FOR '${user1}' 'default_compute_group' = '${validCluster}'""" + sql """SET PROPERTY FOR '${user2}' 'default_compute_group' = '${validCluster}'""" + def show_group_1 = getCluster(validCluster) + + assertTrue(show_group_1[2].contains(user2), "Expect contain users ${user2}") + + result = getProperty("default_compute_group", "${user1}") + assertEquals(result.Value as String, "${validCluster}" as String) + + connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + result = sql """use @${validCluster}""" + assertEquals(result[0][0], 0) + result = getProperty("default_compute_group", "") + assertEquals(result.Value as String, "${validCluster}" as String) + } + // set default_compute_group to '' + sql """SET PROPERTY FOR '${user2}' 'default_compute_group' = ''""" + connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + result = getProperty("default_compute_group", "") + assertEquals(result.Value as String, "" as String) + } + + sql """SET PROPERTY FOR '${user2}' 'default_compute_group' = '${validCluster}'""" + result = sql """REVOKE USAGE_PRIV ON COMPUTE GROUP '${validCluster}' FROM '${user2}'""" + assertEquals(result[0][0], 0) + connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + test { + sql """use @${group1}""" + exception "USAGE denied to user" + } + } + + connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + test { + sql """use @${validCluster}""" + exception "USAGE denied to user" + } + } + + sql """GRANT USAGE_PRIV ON COMPUTE GROUP '${group1}' TO '${user2}'""" + sql """GRANT USAGE_PRIV ON COMPUTE GROUP '${validCluster}' TO '${user2}'""" + show_group_2 = connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + getCluster(validCluster) + } + + assertTrue(show_group_2[2].equals(user2), "Expect just only have user ${user2}") + + result = connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + sql """USE @${validCluster}""" + } + assertEquals(result[0][0], 0) + + sql """REVOKE USAGE_PRIV ON COMPUTE GROUP '${validCluster}' FROM '${user2}'""" + + connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + test { + sql """use @${validCluster}""" + exception "USAGE denied to user" + } + result = sql_return_maparray """show grants for '${user2}'""" + commonAuth result, "'${user2}'@'%'" as String, "Yes", "", "Select_priv" + assertTrue((result.ComputeGr'o'u'pPrivs as String).contains("${group1}: Cluster_usage_priv")) + + test { + sql """REVOKE USAGE_PRIV ON COMPUTE GROUP 'NotExistCluster' FROM '${user2}'""" + exception "Access denied; you need all" + } + } + + sql """REVOKE USAGE_PRIV ON COMPUTE GROUP '${validCluster}' FROM '${user2}'""" + result = sql_return_maparray """show grants for '${user2}'""" + commonAuth result, "'${user2}'@'%'" as String, "Yes", "", "Select_priv" + assertTrue((result.ComputeGroupPrivs as String).contains("${group1}: Cluster_usage_priv")) + + sql "sync" + // 3. revoke group + // admin role user can revoke group + result = connect(user = "${user1}", password = 'Cloud12345', url = context.config.jdbcUrl) { + sql """REVOKE USAGE_PRIV ON COMPUTE GROUP '${group1}' FROM '${user1}'""" + } + + // revoke GRANT_PRIV from general user, he can not revoke group to other user. + sql """revoke GRANT_PRIV on *.*.* from ${user2}""" + + sql "sync" + + // general user can't revoke group + try { + result = connect(user = "${user2}", password = 'Cloud12345', url = context.config.jdbcUrl) { + sql """REVOKE USAGE_PRIV ON COMPUTE GROUP '${group1}' FROM '${user2}'""" + } + } catch (Exception e) { + assertTrue(e.getMessage().contains("Access denied; you need all"), e.getMessage()) + } + + result = sql_return_maparray """show grants for '${user1}'""" + commonAuth result, "'${user1}'@'%'" as String, "Yes", "admin", "Admin_priv" + assertNull(result.ComputeGroupP'ri'v's[0]) + + result = sql_return_maparray """show grants for '${user2}'""" + commonAuth result, "'${user2}'@'%'" as String, "Yes", "", "Select_priv" + assertTrue((result.ComputeGroupPrivs as String).contains("${group1}: Cluster_usage_priv")) + + // revoke user1 admin role + sql """REVOKE 'admin' FROM ${user1}""" + result = sql_return_maparray """show grants for '${user1}'""" + assertEquals("'${user1}'@'%'" as String, result.UserIdentity[0] as String) + assertEquals("", result.Roles[0]) + assertNull(result.GlobalPrivs[0]) + assertNull(result.ComputeGroupPrivs[0]) + + // user1 no admin auth, so failed to set other default compute group + try { + result = connect(user = "${user1}", password = 'Cloud12345', url = context.config.jdbcUrl) { + sql """SET PROPERTY FOR '${user2}' 'default_compute_group' = '${validCluster}'""" + } + } catch (Exception e) { + assertTrue(e.getMessage().contains("Access denied for user"), e.getMessage()) + } + + sql """drop user if exists ${user1}""" + // grant not exists user + result = sql """GRANT USAGE_PRIV ON COMPUTE GROUP '${group1}' TO 'NotExitUser'""" + assertEquals(result[0][0], 0) + + // drop user and grant he group priv + result = sql """GRANT USAGE_PRIV ON COMPUTE GROUP '${group1}' TO '${user1}'""" + assertEquals(result[0][0], 0) + result = sql """REVOKE USAGE_PRIV ON COMPUTE GROUP '${group1}' FROM '${user1}'""" + assertEquals(result[0][0], 0) + // general user can't grant group to use + sql """drop user if exists ${user2}""" + sql """drop user if exists ${user3}""" +} + + diff --git a/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_compute_group.groovy b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_compute_group.groovy new file mode 100644 index 00000000000..a086731efff --- /dev/null +++ b/regression-test/suites/cloud_p0/cache/multi_cluster/warm_up/cluster/test_warm_up_compute_group.groovy @@ -0,0 +1,265 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +import org.codehaus.groovy.runtime.IOGroovyMethods + +suite("test_warm_up_compute_group") { + def ttlProperties = """ PROPERTIES("file_cache_ttl_seconds"="12000") """ + def getJobState = { jobId -> + def jobStateResult = sql """ SHOW WARM UP JOB WHERE ID = ${jobId} """ + return jobStateResult[0][2] + } + def table = "customer" + + List<String> ipList = new ArrayList<>(); + List<String> hbPortList = new ArrayList<>() + List<String> httpPortList = new ArrayList<>() + List<String> brpcPortList = new ArrayList<>() + List<String> beUniqueIdList = new ArrayList<>() + + String[] bes = context.config.multiClusterBes.split(','); + println("the value is " + context.config.multiClusterBes); + int num = 0 + for(String values : bes) { + if (num++ == 2) break; + println("the value is " + values); + String[] beInfo = values.split(':'); + ipList.add(beInfo[0]); + hbPortList.add(beInfo[1]); + httpPortList.add(beInfo[2]); + beUniqueIdList.add(beInfo[3]); + brpcPortList.add(beInfo[4]); + } + + println("the ip is " + ipList); + println("the heartbeat port is " + hbPortList); + println("the http port is " + httpPortList); + println("the be unique id is " + beUniqueIdList); + println("the brpc port is " + brpcPortList); + + sql new File("""${context.file.parent}/../ddl/${table}_delete.sql""").text + sql new File("""${context.file.parent}/../ddl/supplier_delete.sql""").text + // create table if not exists + sql (new File("""${context.file.parent}/../ddl/${table}.sql""").text + ttlProperties) + sql (new File("""${context.file.parent}/../ddl/supplier.sql""").text + ttlProperties) + + sql """ TRUNCATE TABLE __internal_schema.cloud_cache_hotspot; """ + sleep(30000) + + def s3BucketName = getS3BucketName() + def s3WithProperties = """WITH S3 ( + |"AWS_ACCESS_KEY" = "${getS3AK()}", + |"AWS_SECRET_KEY" = "${getS3SK()}", + |"AWS_ENDPOINT" = "${getS3Endpoint()}", + |"AWS_REGION" = "${getS3Region()}", + |"provider" = "${getS3Provider()}") + |PROPERTIES( + |"exec_mem_limit" = "8589934592", + |"load_parallelism" = "3")""".stripMargin() + + + + def clearFileCache = { ip, port -> + httpTest { + endpoint "" + uri ip + ":" + port + """/api/file_cache?op=clear&sync=true""" + op "get" + body "" + } + } + + def getMetricsMethod = { ip, port, check_func -> + httpTest { + endpoint ip + ":" + port + uri "/brpc_metrics" + op "get" + check check_func + } + } + + clearFileCache.call(ipList[0], httpPortList[0]); + clearFileCache.call(ipList[1], httpPortList[1]); + + def load_customer_once = { + def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString() + def loadLabel = table + "_" + uniqueID + // load data from cos + def loadSql = new File("""${context.file.parent}/../ddl/${table}_load.sql""").text.replaceAll("\\\$\\{s3BucketName\\}", s3BucketName) + loadSql = loadSql.replaceAll("\\\$\\{loadLabel\\}", loadLabel) + s3WithProperties + sql loadSql + + // check load state + while (true) { + def stateResult = sql "show load where Label = '${loadLabel}'" + def loadState = stateResult[stateResult.size() - 1][2].toString() + if ("CANCELLED".equalsIgnoreCase(loadState)) { + throw new IllegalStateException("load ${loadLabel} failed.") + } else if ("FINISHED".equalsIgnoreCase(loadState)) { + break + } + sleep(5000) + } + } + + def load_supplier_once = { + def uniqueID = Math.abs(UUID.randomUUID().hashCode()).toString() + def loadLabel = "supplier_" + uniqueID + // load data from cos + def loadSql = new File("""${context.file.parent}/../ddl/supplier_load.sql""").text.replaceAll("\\\$\\{s3BucketName\\}", s3BucketName) + loadSql = loadSql.replaceAll("\\\$\\{loadLabel\\}", loadLabel) + s3WithProperties + sql loadSql + + // check load state + while (true) { + def stateResult = sql "show load where Label = '${loadLabel}'" + def loadState = stateResult[stateResult.size() - 1][2].toString() + if ("CANCELLED".equalsIgnoreCase(loadState)) { + throw new IllegalStateException("load ${loadLabel} failed.") + } else if ("FINISHED".equalsIgnoreCase(loadState)) { + break + } + sleep(5000) + } + } + + sql "use @regression_cluster_name0" + load_customer_once() + load_customer_once() + load_customer_once() + load_customer_once() + load_customer_once() + load_supplier_once() + load_supplier_once() + load_supplier_once() + + for (int i = 0; i < 1000; i++) { + sql "select count(*) from customer" + sql "select count(*) from supplier" + } + sleep(40000) + def jobId_ = sql "WARM UP COMPUTE GROUP regression_cluster_name1 WITH COMPUTE GROUP regression_cluster_name0" + def waitJobDone = { jobId -> + int retryTime = 120 + int i = 0 + for (; i < retryTime; i++) { + sleep(1000) + def status = getJobState(jobId[0][0]) + logger.info(status) + if (status.equals("CANCELLED")) { + assertTrue(false); + } + if (status.equals("FINISHED")) { + break; + } + } + if (i == retryTime) { + sql "cancel warm up job where id = ${jobId[0][0]}" + assertTrue(false); + } + } + waitJobDone(jobId_) + + sleep(30000) + long ttl_cache_size = 0 + getMetricsMethod.call(ipList[0], brpcPortList[0]) { + respCode, body -> + assertEquals("${respCode}".toString(), "200") + String out = "${body}".toString() + def strs = out.split('\n') + Boolean flag = false; + for (String line in strs) { + if (line.contains("ttl_cache_size")) { + if (line.startsWith("#")) { + continue + } + def i = line.indexOf(' ') + ttl_cache_size = line.substring(i).toLong() + flag = true + break + } + } + assertTrue(flag) + } + + getMetricsMethod.call(ipList[1], brpcPortList[1]) { + respCode, body -> + assertEquals("${respCode}".toString(), "200") + String out = "${body}".toString() + def strs = out.split('\n') + Boolean flag = false; + for (String line in strs) { + if (line.contains("ttl_cache_size")) { + if (line.startsWith("#")) { + continue + } + def i = line.indexOf(' ') + assertEquals(ttl_cache_size, line.substring(i).toLong()) + flag = true + break + } + } + assertTrue(flag) + } + + try { + sql "WARM UP COMPUTE GROUP regression_cluster_name1 WITH COMPUTE GROUP regression_cluster_name2" + assertTrue(false) + } catch (Exception e) { + assertTrue(true) + } + + try { + sql "WARM UP COMPUTE GROUP regression_cluster_name2 WITH COMPUTE GROUP regression_cluster_name0" + assertTrue(false) + } catch (Exception e) { + assertTrue(true) + } + + try { + sql "WARM UP COMPUTE GROUP regression_cluster_name0 WITH COMPUTE GROUP regression_cluster_name0" + assertTrue(false) + } catch (Exception e) { + assertTrue(true) + } + + sql new File("""${context.file.parent}/../ddl/${table}_delete.sql""").text + sql new File("""${context.file.parent}/../ddl/supplier_delete.sql""").text + + clearFileCache.call(ipList[1], httpPortList[1]); + jobId_ = sql "WARM UP COMPUTE GROUP regression_cluster_name1 WITH COMPUTE GROUP regression_cluster_name0" + waitJobDone(jobId_) + sleep(40000) + getMetricsMethod.call(ipList[1], brpcPortList[1]) { + respCode, body -> + assertEquals("${respCode}".toString(), "200") + String out = "${body}".toString() + def strs = out.split('\n') + Boolean flag = false; + for (String line in strs) { + if (line.contains("ttl_cache_size")) { + if (line.startsWith("#")) { + continue + } + def j = line.indexOf(' ') + assertEquals(0, line.substring(j).toLong()) + flag = true + break + } + } + assertTrue(flag) + } +} --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org For additional commands, e-mail: commits-h...@doris.apache.org