This is an automated email from the ASF dual-hosted git repository.

wenchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new f8c2671ada36 [SPARK-52872][SQL][TESTS] Improve test coverage for 
`HigherOrderFunctions`
f8c2671ada36 is described below

commit f8c2671ada36dcd8ba516a409ba49fd1b2bc6c8e
Author: Mihailo Timotic <mihailo.timo...@databricks.com>
AuthorDate: Mon Jul 21 18:38:03 2025 +0800

    [SPARK-52872][SQL][TESTS] Improve test coverage for `HigherOrderFunctions`
    
    ### What changes were proposed in this pull request?
    Adding test cases for `HigherOrderFunctions` without lambdas
    
    ### Why are the changes needed?
    To improve test coverage
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    This PR adds tests.
    
    ### Was this patch authored or co-authored using generative AI tooling?
    No
    
    Closes #51564 from mihailotim-db/mihailotim-db/hof_tests.
    
    Authored-by: Mihailo Timotic <mihailo.timo...@databricks.com>
    Signed-off-by: Wenchen Fan <wenc...@databricks.com>
---
 .../higher-order-functions.sql.out                 | 738 ++++++++++++++++++
 .../nonansi/higher-order-functions.sql.out         | 738 ++++++++++++++++++
 .../sql-tests/inputs/higher-order-functions.sql    |  64 +-
 .../results/higher-order-functions.sql.out         | 834 +++++++++++++++++++++
 .../results/nonansi/higher-order-functions.sql.out | 834 +++++++++++++++++++++
 5 files changed, 3207 insertions(+), 1 deletion(-)

diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/higher-order-functions.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/higher-order-functions.sql.out
index c06d1e5534ae..ab09f9ee4785 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/higher-order-functions.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/higher-order-functions.sql.out
@@ -370,3 +370,741 @@ select aggregate(split('abcdefgh',''), array(array('')), 
(acc, x) -> array(array
 -- !query analysis
 Project [aggregate(split(abcdefgh, , -1), array(array()), 
lambdafunction(array(array(lambda x#x)), lambda acc#x, lambda x#x, false), 
lambdafunction(lambda id#x, lambda id#x, false)) AS aggregate(split(abcdefgh, , 
-1), array(array()), lambdafunction(array(array(namedlambdavariable())), 
namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))#x]
 +- OneRowRelation
+
+
+-- !query
+select aggregate(array(1, 2, 3), 0, 100) as aggregate_int_literal
+-- !query analysis
+Project [aggregate(array(1, 2, 3), 0, lambdafunction(100, lambda col0#x, 
lambda col1#x, true), lambdafunction(lambda id#x, lambda id#x, false)) AS 
aggregate_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select aggregate(array(1, 2, 3), map(), map('result', 999)) as 
aggregate_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(result, 999), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, INT>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"MAP<VOID, VOID>\"",
+    "sqlExpr" : "\"aggregate(array(1, 2, 3), map(), lambdafunction(map(result, 
999), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 59,
+    "fragment" : "aggregate(array(1, 2, 3), map(), map('result', 999))"
+  } ]
+}
+
+
+-- !query
+select aggregate(array(1, 2, 3), struct('init', 0), struct('final', 999)) as 
aggregate_struct_literal
+-- !query analysis
+Project [aggregate(array(1, 2, 3), struct(col1, init, col2, 0), 
lambdafunction(struct(col1, final, col2, 999), lambda col0#x, lambda col1#x, 
true), lambdafunction(lambda id#x, lambda id#x, false)) AS 
aggregate_struct_literal#x]
++- OneRowRelation
+
+
+-- !query
+select aggregate(array(1, 2, 3), array(), array('result')) as 
aggregate_array_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(result), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<STRING>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"ARRAY<VOID>\"",
+    "sqlExpr" : "\"aggregate(array(1, 2, 3), array(), 
lambdafunction(array(result), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 58,
+    "fragment" : "aggregate(array(1, 2, 3), array(), array('result'))"
+  } ]
+}
+
+
+-- !query
+select array_sort(array(3, 1, 2), 1) as array_sort_int_literal
+-- !query analysis
+Project [array_sort(array(3, 1, 2), lambdafunction(1, lambda col0#x, lambda 
col1#x, true), false) AS array_sort_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select array_sort(array(3, 1, 2), map('compare', 0)) as array_sort_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_RETURN_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "actualType" : "\"MAP<STRING, INT>\"",
+    "expectedType" : "\"INT\"",
+    "functionName" : "`lambdafunction`",
+    "sqlExpr" : "\"array_sort(array(3, 1, 2), lambdafunction(map(compare, 0), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 52,
+    "fragment" : "array_sort(array(3, 1, 2), map('compare', 0))"
+  } ]
+}
+
+
+-- !query
+select array_sort(array(3, 1, 2), struct('result', 0)) as 
array_sort_struct_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_RETURN_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "actualType" : "\"STRUCT<col1: STRING NOT NULL, col2: INT NOT NULL>\"",
+    "expectedType" : "\"INT\"",
+    "functionName" : "`lambdafunction`",
+    "sqlExpr" : "\"array_sort(array(3, 1, 2), lambdafunction(struct(result, 
0), namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 54,
+    "fragment" : "array_sort(array(3, 1, 2), struct('result', 0))"
+  } ]
+}
+
+
+-- !query
+select array_sort(array(3, 1, 2), array(0)) as array_sort_array_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_RETURN_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "actualType" : "\"ARRAY<INT>\"",
+    "expectedType" : "\"INT\"",
+    "functionName" : "`lambdafunction`",
+    "sqlExpr" : "\"array_sort(array(3, 1, 2), lambdafunction(array(0), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 43,
+    "fragment" : "array_sort(array(3, 1, 2), array(0))"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), 1) as exists_int_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(1, 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 32,
+    "fragment" : "exists(array(1, 2, 3), 1)"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), map('found', true)) as exists_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(found, true), namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(map(found, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 49,
+    "fragment" : "exists(array(1, 2, 3), map('found', true))"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), struct('exists', true)) as exists_struct_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(exists, true), 
namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(struct(exists, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 53,
+    "fragment" : "exists(array(1, 2, 3), struct('exists', true))"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), array(true)) as exists_array_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true), namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(array(true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 42,
+    "fragment" : "exists(array(1, 2, 3), array(true))"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), 1) as filter_int_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(1, 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 32,
+    "fragment" : "filter(array(1, 2, 3), 1)"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), map('key', 'value')) as filter_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(key, value), namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, STRING>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(map(key, value), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 50,
+    "fragment" : "filter(array(1, 2, 3), map('key', 'value'))"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), struct('valid', true)) as filter_struct_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(valid, true), 
namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(struct(valid, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 52,
+    "fragment" : "filter(array(1, 2, 3), struct('valid', true))"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), array(true, false)) as filter_array_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true, false), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(array(true, false), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 49,
+    "fragment" : "filter(array(1, 2, 3), array(true, false))"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), 1) as forall_int_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(1, 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 32,
+    "fragment" : "forall(array(1, 2, 3), 1)"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), map('all', true)) as forall_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(all, true), namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(map(all, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 47,
+    "fragment" : "forall(array(1, 2, 3), map('all', true))"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), struct('all', true)) as forall_struct_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(all, true), 
namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(struct(all, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 50,
+    "fragment" : "forall(array(1, 2, 3), struct('all', true))"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), array(true, true)) as forall_array_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true, true), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(array(true, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 48,
+    "fragment" : "forall(array(1, 2, 3), array(true, true))"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), 1) as map_filter_int_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(1, 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 41,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), 1)"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), map('keep', true)) as 
map_filter_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(keep, true), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(map(keep, true), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 57,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), map('keep', true))"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), struct('filter', true)) as 
map_filter_struct_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(filter, true), 
namedlambdavariable(), namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(struct(filter, 
true), namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 62,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), struct('filter', true))"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), array(true)) as map_filter_array_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(array(true), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 51,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), array(true))"
+  } ]
+}
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), 100) as map_zipwith_int_literal
+-- !query analysis
+Project [map_zip_with(map(a, 1), map(a, 10), lambdafunction(100, lambda 
col0#x, lambda col1#x, lambda col2#x, true)) AS map_zipwith_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), map('merged', true)) as 
map_zipwith_map_literal
+-- !query analysis
+Project [map_zip_with(map(a, 1), map(a, 10), lambdafunction(map(merged, true), 
lambda col0#x, lambda col1#x, lambda col2#x, true)) AS 
map_zipwith_map_literal#x]
++- OneRowRelation
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), struct('left', 1, 'right', 10)) 
as map_zipwith_struct_literal
+-- !query analysis
+Project [map_zip_with(map(a, 1), map(a, 10), lambdafunction(struct(col1, left, 
col2, 1, col3, right, col4, 10), lambda col0#x, lambda col1#x, lambda col2#x, 
true)) AS map_zipwith_struct_literal#x]
++- OneRowRelation
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), array('combined')) as 
map_zipwith_array_literal
+-- !query analysis
+Project [map_zip_with(map(a, 1), map(a, 10), lambdafunction(array(combined), 
lambda col0#x, lambda col1#x, lambda col2#x, true)) AS 
map_zipwith_array_literal#x]
++- OneRowRelation
+
+
+-- !query
+select reduce(array(1, 2, 3), 0, 100) as reduce_int_literal
+-- !query analysis
+Project [reduce(array(1, 2, 3), 0, lambdafunction(100, lambda col0#x, lambda 
col1#x, true), lambdafunction(lambda id#x, lambda id#x, false)) AS 
reduce_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select reduce(array(1, 2, 3), map(), map('result', 999)) as reduce_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(result, 999), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, INT>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"MAP<VOID, VOID>\"",
+    "sqlExpr" : "\"reduce(array(1, 2, 3), map(), lambdafunction(map(result, 
999), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 56,
+    "fragment" : "reduce(array(1, 2, 3), map(), map('result', 999))"
+  } ]
+}
+
+
+-- !query
+select reduce(array(1, 2, 3), struct('init', 0), struct('final', 999)) as 
reduce_struct_literal
+-- !query analysis
+Project [reduce(array(1, 2, 3), struct(col1, init, col2, 0), 
lambdafunction(struct(col1, final, col2, 999), lambda col0#x, lambda col1#x, 
true), lambdafunction(lambda id#x, lambda id#x, false)) AS 
reduce_struct_literal#x]
++- OneRowRelation
+
+
+-- !query
+select reduce(array(1, 2, 3), array(), array('result')) as reduce_array_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(result), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<STRING>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"ARRAY<VOID>\"",
+    "sqlExpr" : "\"reduce(array(1, 2, 3), array(), 
lambdafunction(array(result), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 55,
+    "fragment" : "reduce(array(1, 2, 3), array(), array('result'))"
+  } ]
+}
+
+
+-- !query
+select transform(array(1, 2, 3), 42) as transform_int_literal
+-- !query analysis
+Project [transform(array(1, 2, 3), lambdafunction(42, lambda col0#x, true)) AS 
transform_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform(array(1, 2, 3), map('key', 'value')) as transform_map_literal
+-- !query analysis
+Project [transform(array(1, 2, 3), lambdafunction(map(key, value), lambda 
col0#x, true)) AS transform_map_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform(array(1, 2, 3), struct('id', 99, 'name', 'test')) as 
transform_struct_literal
+-- !query analysis
+Project [transform(array(1, 2, 3), lambdafunction(struct(col1, id, col2, 99, 
col3, name, col4, test), lambda col0#x, true)) AS transform_struct_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform(array(1, 2, 3), array('a', 'b')) as transform_array_literal
+-- !query analysis
+Project [transform(array(1, 2, 3), lambdafunction(array(a, b), lambda col0#x, 
true)) AS transform_array_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), 42) as transform_keys_int_literal
+-- !query analysis
+Project [transform_keys(map(a, 1, b, 2), lambdafunction(42, lambda col0#x, 
lambda col1#x, true)) AS transform_keys_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), map('new', 'key')) as 
transform_keys_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.INVALID_MAP_KEY_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "keyType" : "\"MAP<STRING, STRING>\"",
+    "sqlExpr" : "\"transform_keys(map(a, 1, b, 2), lambdafunction(map(new, 
key), namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 61,
+    "fragment" : "transform_keys(map('a', 1, 'b', 2), map('new', 'key'))"
+  } ]
+}
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), struct('key', 'value')) as 
transform_keys_struct_literal
+-- !query analysis
+Project [transform_keys(map(a, 1, b, 2), lambdafunction(struct(col1, key, 
col2, value), lambda col0#x, lambda col1#x, true)) AS 
transform_keys_struct_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), array('new_key')) as 
transform_keys_array_literal
+-- !query analysis
+Project [transform_keys(map(a, 1, b, 2), lambdafunction(array(new_key), lambda 
col0#x, lambda col1#x, true)) AS transform_keys_array_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), 999) as 
transform_values_int_literal
+-- !query analysis
+Project [transform_values(map(a, 1, b, 2), lambdafunction(999, lambda col0#x, 
lambda col1#x, true)) AS transform_values_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), map('new', 'value')) as 
transform_values_map_literal
+-- !query analysis
+Project [transform_values(map(a, 1, b, 2), lambdafunction(map(new, value), 
lambda col0#x, lambda col1#x, true)) AS transform_values_map_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), struct('val', 999)) as 
transform_values_struct_literal
+-- !query analysis
+Project [transform_values(map(a, 1, b, 2), lambdafunction(struct(col1, val, 
col2, 999), lambda col0#x, lambda col1#x, true)) AS 
transform_values_struct_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), array('new_value')) as 
transform_values_array_literal
+-- !query analysis
+Project [transform_values(map(a, 1, b, 2), lambdafunction(array(new_value), 
lambda col0#x, lambda col1#x, true)) AS transform_values_array_literal#x]
++- OneRowRelation
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), 100) as zipwith_int_literal
+-- !query analysis
+Project [zip_with(array(1, 2, 3), array(4, 5, 6), lambdafunction(100, lambda 
col0#x, lambda col1#x, true)) AS zipwith_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), map('merged', true)) as 
zipwith_map_literal
+-- !query analysis
+Project [zip_with(array(1, 2, 3), array(4, 5, 6), lambdafunction(map(merged, 
true), lambda col0#x, lambda col1#x, true)) AS zipwith_map_literal#x]
++- OneRowRelation
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), struct('left', 1, 'right', 2)) 
as zipwith_struct_literal
+-- !query analysis
+Project [zip_with(array(1, 2, 3), array(4, 5, 6), lambdafunction(struct(col1, 
left, col2, 1, col3, right, col4, 2), lambda col0#x, lambda col1#x, true)) AS 
zipwith_struct_literal#x]
++- OneRowRelation
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), array('combined')) as 
zipwith_array_literal
+-- !query analysis
+Project [zip_with(array(1, 2, 3), array(4, 5, 6), 
lambdafunction(array(combined), lambda col0#x, lambda col1#x, true)) AS 
zipwith_array_literal#x]
++- OneRowRelation
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/nonansi/higher-order-functions.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/nonansi/higher-order-functions.sql.out
index 1281b19eb2f8..d4717b18bac5 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/nonansi/higher-order-functions.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/nonansi/higher-order-functions.sql.out
@@ -370,3 +370,741 @@ select aggregate(split('abcdefgh',''), array(array('')), 
(acc, x) -> array(array
 -- !query analysis
 Project [aggregate(split(abcdefgh, , -1), array(array()), 
lambdafunction(array(array(lambda x#x)), lambda acc#x, lambda x#x, false), 
lambdafunction(lambda id#x, lambda id#x, false)) AS aggregate(split(abcdefgh, , 
-1), array(array()), lambdafunction(array(array(namedlambdavariable())), 
namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))#x]
 +- OneRowRelation
+
+
+-- !query
+select aggregate(array(1, 2, 3), 0, 100) as aggregate_int_literal
+-- !query analysis
+Project [aggregate(array(1, 2, 3), 0, lambdafunction(100, lambda col0#x, 
lambda col1#x, true), lambdafunction(lambda id#x, lambda id#x, false)) AS 
aggregate_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select aggregate(array(1, 2, 3), map(), map('result', 999)) as 
aggregate_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(result, 999), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, INT>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"MAP<VOID, VOID>\"",
+    "sqlExpr" : "\"aggregate(array(1, 2, 3), map(), lambdafunction(map(result, 
999), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 59,
+    "fragment" : "aggregate(array(1, 2, 3), map(), map('result', 999))"
+  } ]
+}
+
+
+-- !query
+select aggregate(array(1, 2, 3), struct('init', 0), struct('final', 999)) as 
aggregate_struct_literal
+-- !query analysis
+Project [aggregate(array(1, 2, 3), struct(col1, init, col2, 0), 
lambdafunction(struct(col1, final, col2, 999), lambda col0#x, lambda col1#x, 
true), lambdafunction(lambda id#x, lambda id#x, false)) AS 
aggregate_struct_literal#x]
++- OneRowRelation
+
+
+-- !query
+select aggregate(array(1, 2, 3), array(), array('result')) as 
aggregate_array_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(result), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<STRING>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"ARRAY<VOID>\"",
+    "sqlExpr" : "\"aggregate(array(1, 2, 3), array(), 
lambdafunction(array(result), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 58,
+    "fragment" : "aggregate(array(1, 2, 3), array(), array('result'))"
+  } ]
+}
+
+
+-- !query
+select array_sort(array(3, 1, 2), 1) as array_sort_int_literal
+-- !query analysis
+Project [array_sort(array(3, 1, 2), lambdafunction(1, lambda col0#x, lambda 
col1#x, true), false) AS array_sort_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select array_sort(array(3, 1, 2), map('compare', 0)) as array_sort_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_RETURN_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "actualType" : "\"MAP<STRING, INT>\"",
+    "expectedType" : "\"INT\"",
+    "functionName" : "`lambdafunction`",
+    "sqlExpr" : "\"array_sort(array(3, 1, 2), lambdafunction(map(compare, 0), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 52,
+    "fragment" : "array_sort(array(3, 1, 2), map('compare', 0))"
+  } ]
+}
+
+
+-- !query
+select array_sort(array(3, 1, 2), struct('result', 0)) as 
array_sort_struct_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_RETURN_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "actualType" : "\"STRUCT<col1: STRING NOT NULL, col2: INT NOT NULL>\"",
+    "expectedType" : "\"INT\"",
+    "functionName" : "`lambdafunction`",
+    "sqlExpr" : "\"array_sort(array(3, 1, 2), lambdafunction(struct(result, 
0), namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 54,
+    "fragment" : "array_sort(array(3, 1, 2), struct('result', 0))"
+  } ]
+}
+
+
+-- !query
+select array_sort(array(3, 1, 2), array(0)) as array_sort_array_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_RETURN_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "actualType" : "\"ARRAY<INT>\"",
+    "expectedType" : "\"INT\"",
+    "functionName" : "`lambdafunction`",
+    "sqlExpr" : "\"array_sort(array(3, 1, 2), lambdafunction(array(0), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 43,
+    "fragment" : "array_sort(array(3, 1, 2), array(0))"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), 1) as exists_int_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(1, 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 32,
+    "fragment" : "exists(array(1, 2, 3), 1)"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), map('found', true)) as exists_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(found, true), namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(map(found, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 49,
+    "fragment" : "exists(array(1, 2, 3), map('found', true))"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), struct('exists', true)) as exists_struct_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(exists, true), 
namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(struct(exists, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 53,
+    "fragment" : "exists(array(1, 2, 3), struct('exists', true))"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), array(true)) as exists_array_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true), namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(array(true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 42,
+    "fragment" : "exists(array(1, 2, 3), array(true))"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), 1) as filter_int_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(1, 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 32,
+    "fragment" : "filter(array(1, 2, 3), 1)"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), map('key', 'value')) as filter_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(key, value), namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, STRING>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(map(key, value), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 50,
+    "fragment" : "filter(array(1, 2, 3), map('key', 'value'))"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), struct('valid', true)) as filter_struct_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(valid, true), 
namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(struct(valid, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 52,
+    "fragment" : "filter(array(1, 2, 3), struct('valid', true))"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), array(true, false)) as filter_array_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true, false), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(array(true, false), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 49,
+    "fragment" : "filter(array(1, 2, 3), array(true, false))"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), 1) as forall_int_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(1, 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 32,
+    "fragment" : "forall(array(1, 2, 3), 1)"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), map('all', true)) as forall_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(all, true), namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(map(all, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 47,
+    "fragment" : "forall(array(1, 2, 3), map('all', true))"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), struct('all', true)) as forall_struct_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(all, true), 
namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(struct(all, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 50,
+    "fragment" : "forall(array(1, 2, 3), struct('all', true))"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), array(true, true)) as forall_array_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true, true), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(array(true, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 48,
+    "fragment" : "forall(array(1, 2, 3), array(true, true))"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), 1) as map_filter_int_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(1, 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 41,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), 1)"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), map('keep', true)) as 
map_filter_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(keep, true), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(map(keep, true), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 57,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), map('keep', true))"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), struct('filter', true)) as 
map_filter_struct_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(filter, true), 
namedlambdavariable(), namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(struct(filter, 
true), namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 62,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), struct('filter', true))"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), array(true)) as map_filter_array_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(array(true), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 51,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), array(true))"
+  } ]
+}
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), 100) as map_zipwith_int_literal
+-- !query analysis
+Project [map_zip_with(map(a, 1), map(a, 10), lambdafunction(100, lambda 
col0#x, lambda col1#x, lambda col2#x, true)) AS map_zipwith_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), map('merged', true)) as 
map_zipwith_map_literal
+-- !query analysis
+Project [map_zip_with(map(a, 1), map(a, 10), lambdafunction(map(merged, true), 
lambda col0#x, lambda col1#x, lambda col2#x, true)) AS 
map_zipwith_map_literal#x]
++- OneRowRelation
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), struct('left', 1, 'right', 10)) 
as map_zipwith_struct_literal
+-- !query analysis
+Project [map_zip_with(map(a, 1), map(a, 10), lambdafunction(struct(col1, left, 
col2, 1, col3, right, col4, 10), lambda col0#x, lambda col1#x, lambda col2#x, 
true)) AS map_zipwith_struct_literal#x]
++- OneRowRelation
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), array('combined')) as 
map_zipwith_array_literal
+-- !query analysis
+Project [map_zip_with(map(a, 1), map(a, 10), lambdafunction(array(combined), 
lambda col0#x, lambda col1#x, lambda col2#x, true)) AS 
map_zipwith_array_literal#x]
++- OneRowRelation
+
+
+-- !query
+select reduce(array(1, 2, 3), 0, 100) as reduce_int_literal
+-- !query analysis
+Project [reduce(array(1, 2, 3), 0, lambdafunction(100, lambda col0#x, lambda 
col1#x, true), lambdafunction(lambda id#x, lambda id#x, false)) AS 
reduce_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select reduce(array(1, 2, 3), map(), map('result', 999)) as reduce_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(result, 999), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, INT>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"MAP<VOID, VOID>\"",
+    "sqlExpr" : "\"reduce(array(1, 2, 3), map(), lambdafunction(map(result, 
999), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 56,
+    "fragment" : "reduce(array(1, 2, 3), map(), map('result', 999))"
+  } ]
+}
+
+
+-- !query
+select reduce(array(1, 2, 3), struct('init', 0), struct('final', 999)) as 
reduce_struct_literal
+-- !query analysis
+Project [reduce(array(1, 2, 3), struct(col1, init, col2, 0), 
lambdafunction(struct(col1, final, col2, 999), lambda col0#x, lambda col1#x, 
true), lambdafunction(lambda id#x, lambda id#x, false)) AS 
reduce_struct_literal#x]
++- OneRowRelation
+
+
+-- !query
+select reduce(array(1, 2, 3), array(), array('result')) as reduce_array_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(result), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<STRING>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"ARRAY<VOID>\"",
+    "sqlExpr" : "\"reduce(array(1, 2, 3), array(), 
lambdafunction(array(result), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 55,
+    "fragment" : "reduce(array(1, 2, 3), array(), array('result'))"
+  } ]
+}
+
+
+-- !query
+select transform(array(1, 2, 3), 42) as transform_int_literal
+-- !query analysis
+Project [transform(array(1, 2, 3), lambdafunction(42, lambda col0#x, true)) AS 
transform_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform(array(1, 2, 3), map('key', 'value')) as transform_map_literal
+-- !query analysis
+Project [transform(array(1, 2, 3), lambdafunction(map(key, value), lambda 
col0#x, true)) AS transform_map_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform(array(1, 2, 3), struct('id', 99, 'name', 'test')) as 
transform_struct_literal
+-- !query analysis
+Project [transform(array(1, 2, 3), lambdafunction(struct(col1, id, col2, 99, 
col3, name, col4, test), lambda col0#x, true)) AS transform_struct_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform(array(1, 2, 3), array('a', 'b')) as transform_array_literal
+-- !query analysis
+Project [transform(array(1, 2, 3), lambdafunction(array(a, b), lambda col0#x, 
true)) AS transform_array_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), 42) as transform_keys_int_literal
+-- !query analysis
+Project [transform_keys(map(a, 1, b, 2), lambdafunction(42, lambda col0#x, 
lambda col1#x, true)) AS transform_keys_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), map('new', 'key')) as 
transform_keys_map_literal
+-- !query analysis
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.INVALID_MAP_KEY_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "keyType" : "\"MAP<STRING, STRING>\"",
+    "sqlExpr" : "\"transform_keys(map(a, 1, b, 2), lambdafunction(map(new, 
key), namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 61,
+    "fragment" : "transform_keys(map('a', 1, 'b', 2), map('new', 'key'))"
+  } ]
+}
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), struct('key', 'value')) as 
transform_keys_struct_literal
+-- !query analysis
+Project [transform_keys(map(a, 1, b, 2), lambdafunction(struct(col1, key, 
col2, value), lambda col0#x, lambda col1#x, true)) AS 
transform_keys_struct_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), array('new_key')) as 
transform_keys_array_literal
+-- !query analysis
+Project [transform_keys(map(a, 1, b, 2), lambdafunction(array(new_key), lambda 
col0#x, lambda col1#x, true)) AS transform_keys_array_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), 999) as 
transform_values_int_literal
+-- !query analysis
+Project [transform_values(map(a, 1, b, 2), lambdafunction(999, lambda col0#x, 
lambda col1#x, true)) AS transform_values_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), map('new', 'value')) as 
transform_values_map_literal
+-- !query analysis
+Project [transform_values(map(a, 1, b, 2), lambdafunction(map(new, value), 
lambda col0#x, lambda col1#x, true)) AS transform_values_map_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), struct('val', 999)) as 
transform_values_struct_literal
+-- !query analysis
+Project [transform_values(map(a, 1, b, 2), lambdafunction(struct(col1, val, 
col2, 999), lambda col0#x, lambda col1#x, true)) AS 
transform_values_struct_literal#x]
++- OneRowRelation
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), array('new_value')) as 
transform_values_array_literal
+-- !query analysis
+Project [transform_values(map(a, 1, b, 2), lambdafunction(array(new_value), 
lambda col0#x, lambda col1#x, true)) AS transform_values_array_literal#x]
++- OneRowRelation
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), 100) as zipwith_int_literal
+-- !query analysis
+Project [zip_with(array(1, 2, 3), array(4, 5, 6), lambdafunction(100, lambda 
col0#x, lambda col1#x, true)) AS zipwith_int_literal#x]
++- OneRowRelation
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), map('merged', true)) as 
zipwith_map_literal
+-- !query analysis
+Project [zip_with(array(1, 2, 3), array(4, 5, 6), lambdafunction(map(merged, 
true), lambda col0#x, lambda col1#x, true)) AS zipwith_map_literal#x]
++- OneRowRelation
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), struct('left', 1, 'right', 2)) 
as zipwith_struct_literal
+-- !query analysis
+Project [zip_with(array(1, 2, 3), array(4, 5, 6), lambdafunction(struct(col1, 
left, col2, 1, col3, right, col4, 2), lambda col0#x, lambda col1#x, true)) AS 
zipwith_struct_literal#x]
++- OneRowRelation
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), array('combined')) as 
zipwith_array_literal
+-- !query analysis
+Project [zip_with(array(1, 2, 3), array(4, 5, 6), 
lambdafunction(array(combined), lambda col0#x, lambda col1#x, true)) AS 
zipwith_array_literal#x]
++- OneRowRelation
diff --git 
a/sql/core/src/test/resources/sql-tests/inputs/higher-order-functions.sql 
b/sql/core/src/test/resources/sql-tests/inputs/higher-order-functions.sql
index 37081de012e9..f1c92327bde3 100644
--- a/sql/core/src/test/resources/sql-tests/inputs/higher-order-functions.sql
+++ b/sql/core/src/test/resources/sql-tests/inputs/higher-order-functions.sql
@@ -102,4 +102,66 @@ select transform(ys, all -> all * all) as v from values 
(array(32, 97)) as t(ys)
 select transform(ys, (all, i) -> all + i) as v from values (array(32, 97)) as 
t(ys);
 
 -- SPARK-32819: Aggregate on nested string arrays
-select aggregate(split('abcdefgh',''), array(array('')), (acc, x) -> 
array(array(x)));
\ No newline at end of file
+select aggregate(split('abcdefgh',''), array(array('')), (acc, x) -> 
array(array(x)));
+
+-- HigherOrderFunctions without lambda variables
+
+select aggregate(array(1, 2, 3), 0, 100) as aggregate_int_literal;
+select aggregate(array(1, 2, 3), map(), map('result', 999)) as 
aggregate_map_literal;
+select aggregate(array(1, 2, 3), struct('init', 0), struct('final', 999)) as 
aggregate_struct_literal;
+select aggregate(array(1, 2, 3), array(), array('result')) as 
aggregate_array_literal;
+
+select array_sort(array(3, 1, 2), 1) as array_sort_int_literal;
+select array_sort(array(3, 1, 2), map('compare', 0)) as array_sort_map_literal;
+select array_sort(array(3, 1, 2), struct('result', 0)) as 
array_sort_struct_literal;
+select array_sort(array(3, 1, 2), array(0)) as array_sort_array_literal;
+
+select exists(array(1, 2, 3), 1) as exists_int_literal;
+select exists(array(1, 2, 3), map('found', true)) as exists_map_literal;
+select exists(array(1, 2, 3), struct('exists', true)) as exists_struct_literal;
+select exists(array(1, 2, 3), array(true)) as exists_array_literal;
+
+select filter(array(1, 2, 3), 1) as filter_int_literal;
+select filter(array(1, 2, 3), map('key', 'value')) as filter_map_literal;
+select filter(array(1, 2, 3), struct('valid', true)) as filter_struct_literal;
+select filter(array(1, 2, 3), array(true, false)) as filter_array_literal;
+
+select forall(array(1, 2, 3), 1) as forall_int_literal;
+select forall(array(1, 2, 3), map('all', true)) as forall_map_literal;
+select forall(array(1, 2, 3), struct('all', true)) as forall_struct_literal;
+select forall(array(1, 2, 3), array(true, true)) as forall_array_literal;
+
+select map_filter(map('a', 1, 'b', 2), 1) as map_filter_int_literal;
+select map_filter(map('a', 1, 'b', 2), map('keep', true)) as 
map_filter_map_literal;
+select map_filter(map('a', 1, 'b', 2), struct('filter', true)) as 
map_filter_struct_literal;
+select map_filter(map('a', 1, 'b', 2), array(true)) as 
map_filter_array_literal;
+
+select map_zip_with(map('a', 1), map('a', 10), 100) as map_zipwith_int_literal;
+select map_zip_with(map('a', 1), map('a', 10), map('merged', true)) as 
map_zipwith_map_literal;
+select map_zip_with(map('a', 1), map('a', 10), struct('left', 1, 'right', 10)) 
as map_zipwith_struct_literal;
+select map_zip_with(map('a', 1), map('a', 10), array('combined')) as 
map_zipwith_array_literal;
+
+select reduce(array(1, 2, 3), 0, 100) as reduce_int_literal;
+select reduce(array(1, 2, 3), map(), map('result', 999)) as reduce_map_literal;
+select reduce(array(1, 2, 3), struct('init', 0), struct('final', 999)) as 
reduce_struct_literal;
+select reduce(array(1, 2, 3), array(), array('result')) as 
reduce_array_literal;
+
+select transform(array(1, 2, 3), 42) as transform_int_literal;
+select transform(array(1, 2, 3), map('key', 'value')) as transform_map_literal;
+select transform(array(1, 2, 3), struct('id', 99, 'name', 'test')) as 
transform_struct_literal;
+select transform(array(1, 2, 3), array('a', 'b')) as transform_array_literal;
+
+select transform_keys(map('a', 1, 'b', 2), 42) as transform_keys_int_literal;
+select transform_keys(map('a', 1, 'b', 2), map('new', 'key')) as 
transform_keys_map_literal;
+select transform_keys(map('a', 1, 'b', 2), struct('key', 'value')) as 
transform_keys_struct_literal;
+select transform_keys(map('a', 1, 'b', 2), array('new_key')) as 
transform_keys_array_literal;
+
+select transform_values(map('a', 1, 'b', 2), 999) as 
transform_values_int_literal;
+select transform_values(map('a', 1, 'b', 2), map('new', 'value')) as 
transform_values_map_literal;
+select transform_values(map('a', 1, 'b', 2), struct('val', 999)) as 
transform_values_struct_literal;
+select transform_values(map('a', 1, 'b', 2), array('new_value')) as 
transform_values_array_literal;
+
+select zip_with(array(1, 2, 3), array(4, 5, 6), 100) as zipwith_int_literal;
+select zip_with(array(1, 2, 3), array(4, 5, 6), map('merged', true)) as 
zipwith_map_literal;
+select zip_with(array(1, 2, 3), array(4, 5, 6), struct('left', 1, 'right', 2)) 
as zipwith_struct_literal;
+select zip_with(array(1, 2, 3), array(4, 5, 6), array('combined')) as 
zipwith_array_literal;
diff --git 
a/sql/core/src/test/resources/sql-tests/results/higher-order-functions.sql.out 
b/sql/core/src/test/resources/sql-tests/results/higher-order-functions.sql.out
index 7bfc35a61e09..b16bbcda2eb5 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/higher-order-functions.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/higher-order-functions.sql.out
@@ -348,3 +348,837 @@ select aggregate(split('abcdefgh',''), array(array('')), 
(acc, x) -> array(array
 struct<aggregate(split(abcdefgh, , -1), array(array()), 
lambdafunction(array(array(namedlambdavariable())), namedlambdavariable(), 
namedlambdavariable()), lambdafunction(namedlambdavariable(), 
namedlambdavariable())):array<array<string>>>
 -- !query output
 [["h"]]
+
+
+-- !query
+select aggregate(array(1, 2, 3), 0, 100) as aggregate_int_literal
+-- !query schema
+struct<aggregate_int_literal:int>
+-- !query output
+100
+
+
+-- !query
+select aggregate(array(1, 2, 3), map(), map('result', 999)) as 
aggregate_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(result, 999), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, INT>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"MAP<VOID, VOID>\"",
+    "sqlExpr" : "\"aggregate(array(1, 2, 3), map(), lambdafunction(map(result, 
999), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 59,
+    "fragment" : "aggregate(array(1, 2, 3), map(), map('result', 999))"
+  } ]
+}
+
+
+-- !query
+select aggregate(array(1, 2, 3), struct('init', 0), struct('final', 999)) as 
aggregate_struct_literal
+-- !query schema
+struct<aggregate_struct_literal:struct<col1:string,col2:int>>
+-- !query output
+{"col1":"final","col2":999}
+
+
+-- !query
+select aggregate(array(1, 2, 3), array(), array('result')) as 
aggregate_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(result), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<STRING>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"ARRAY<VOID>\"",
+    "sqlExpr" : "\"aggregate(array(1, 2, 3), array(), 
lambdafunction(array(result), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 58,
+    "fragment" : "aggregate(array(1, 2, 3), array(), array('result'))"
+  } ]
+}
+
+
+-- !query
+select array_sort(array(3, 1, 2), 1) as array_sort_int_literal
+-- !query schema
+struct<array_sort_int_literal:array<int>>
+-- !query output
+[3,1,2]
+
+
+-- !query
+select array_sort(array(3, 1, 2), map('compare', 0)) as array_sort_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_RETURN_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "actualType" : "\"MAP<STRING, INT>\"",
+    "expectedType" : "\"INT\"",
+    "functionName" : "`lambdafunction`",
+    "sqlExpr" : "\"array_sort(array(3, 1, 2), lambdafunction(map(compare, 0), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 52,
+    "fragment" : "array_sort(array(3, 1, 2), map('compare', 0))"
+  } ]
+}
+
+
+-- !query
+select array_sort(array(3, 1, 2), struct('result', 0)) as 
array_sort_struct_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_RETURN_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "actualType" : "\"STRUCT<col1: STRING NOT NULL, col2: INT NOT NULL>\"",
+    "expectedType" : "\"INT\"",
+    "functionName" : "`lambdafunction`",
+    "sqlExpr" : "\"array_sort(array(3, 1, 2), lambdafunction(struct(result, 
0), namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 54,
+    "fragment" : "array_sort(array(3, 1, 2), struct('result', 0))"
+  } ]
+}
+
+
+-- !query
+select array_sort(array(3, 1, 2), array(0)) as array_sort_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_RETURN_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "actualType" : "\"ARRAY<INT>\"",
+    "expectedType" : "\"INT\"",
+    "functionName" : "`lambdafunction`",
+    "sqlExpr" : "\"array_sort(array(3, 1, 2), lambdafunction(array(0), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 43,
+    "fragment" : "array_sort(array(3, 1, 2), array(0))"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), 1) as exists_int_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(1, 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 32,
+    "fragment" : "exists(array(1, 2, 3), 1)"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), map('found', true)) as exists_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(found, true), namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(map(found, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 49,
+    "fragment" : "exists(array(1, 2, 3), map('found', true))"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), struct('exists', true)) as exists_struct_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(exists, true), 
namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(struct(exists, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 53,
+    "fragment" : "exists(array(1, 2, 3), struct('exists', true))"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), array(true)) as exists_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true), namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(array(true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 42,
+    "fragment" : "exists(array(1, 2, 3), array(true))"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), 1) as filter_int_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(1, 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 32,
+    "fragment" : "filter(array(1, 2, 3), 1)"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), map('key', 'value')) as filter_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(key, value), namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, STRING>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(map(key, value), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 50,
+    "fragment" : "filter(array(1, 2, 3), map('key', 'value'))"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), struct('valid', true)) as filter_struct_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(valid, true), 
namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(struct(valid, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 52,
+    "fragment" : "filter(array(1, 2, 3), struct('valid', true))"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), array(true, false)) as filter_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true, false), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(array(true, false), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 49,
+    "fragment" : "filter(array(1, 2, 3), array(true, false))"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), 1) as forall_int_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(1, 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 32,
+    "fragment" : "forall(array(1, 2, 3), 1)"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), map('all', true)) as forall_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(all, true), namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(map(all, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 47,
+    "fragment" : "forall(array(1, 2, 3), map('all', true))"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), struct('all', true)) as forall_struct_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(all, true), 
namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(struct(all, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 50,
+    "fragment" : "forall(array(1, 2, 3), struct('all', true))"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), array(true, true)) as forall_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true, true), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(array(true, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 48,
+    "fragment" : "forall(array(1, 2, 3), array(true, true))"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), 1) as map_filter_int_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(1, 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 41,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), 1)"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), map('keep', true)) as 
map_filter_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(keep, true), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(map(keep, true), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 57,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), map('keep', true))"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), struct('filter', true)) as 
map_filter_struct_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(filter, true), 
namedlambdavariable(), namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(struct(filter, 
true), namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 62,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), struct('filter', true))"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), array(true)) as map_filter_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(array(true), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 51,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), array(true))"
+  } ]
+}
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), 100) as map_zipwith_int_literal
+-- !query schema
+struct<map_zipwith_int_literal:map<string,int>>
+-- !query output
+{"a":100}
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), map('merged', true)) as 
map_zipwith_map_literal
+-- !query schema
+struct<map_zipwith_map_literal:map<string,map<string,boolean>>>
+-- !query output
+{"a":{"merged":true}}
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), struct('left', 1, 'right', 10)) 
as map_zipwith_struct_literal
+-- !query schema
+struct<map_zipwith_struct_literal:map<string,struct<col1:string,col2:int,col3:string,col4:int>>>
+-- !query output
+{"a":{"col1":"left","col2":1,"col3":"right","col4":10}}
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), array('combined')) as 
map_zipwith_array_literal
+-- !query schema
+struct<map_zipwith_array_literal:map<string,array<string>>>
+-- !query output
+{"a":["combined"]}
+
+
+-- !query
+select reduce(array(1, 2, 3), 0, 100) as reduce_int_literal
+-- !query schema
+struct<reduce_int_literal:int>
+-- !query output
+100
+
+
+-- !query
+select reduce(array(1, 2, 3), map(), map('result', 999)) as reduce_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(result, 999), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, INT>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"MAP<VOID, VOID>\"",
+    "sqlExpr" : "\"reduce(array(1, 2, 3), map(), lambdafunction(map(result, 
999), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 56,
+    "fragment" : "reduce(array(1, 2, 3), map(), map('result', 999))"
+  } ]
+}
+
+
+-- !query
+select reduce(array(1, 2, 3), struct('init', 0), struct('final', 999)) as 
reduce_struct_literal
+-- !query schema
+struct<reduce_struct_literal:struct<col1:string,col2:int>>
+-- !query output
+{"col1":"final","col2":999}
+
+
+-- !query
+select reduce(array(1, 2, 3), array(), array('result')) as reduce_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(result), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<STRING>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"ARRAY<VOID>\"",
+    "sqlExpr" : "\"reduce(array(1, 2, 3), array(), 
lambdafunction(array(result), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 55,
+    "fragment" : "reduce(array(1, 2, 3), array(), array('result'))"
+  } ]
+}
+
+
+-- !query
+select transform(array(1, 2, 3), 42) as transform_int_literal
+-- !query schema
+struct<transform_int_literal:array<int>>
+-- !query output
+[42,42,42]
+
+
+-- !query
+select transform(array(1, 2, 3), map('key', 'value')) as transform_map_literal
+-- !query schema
+struct<transform_map_literal:array<map<string,string>>>
+-- !query output
+[{"key":"value"},{"key":"value"},{"key":"value"}]
+
+
+-- !query
+select transform(array(1, 2, 3), struct('id', 99, 'name', 'test')) as 
transform_struct_literal
+-- !query schema
+struct<transform_struct_literal:array<struct<col1:string,col2:int,col3:string,col4:string>>>
+-- !query output
+[{"col1":"id","col2":99,"col3":"name","col4":"test"},{"col1":"id","col2":99,"col3":"name","col4":"test"},{"col1":"id","col2":99,"col3":"name","col4":"test"}]
+
+
+-- !query
+select transform(array(1, 2, 3), array('a', 'b')) as transform_array_literal
+-- !query schema
+struct<transform_array_literal:array<array<string>>>
+-- !query output
+[["a","b"],["a","b"],["a","b"]]
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), 42) as transform_keys_int_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.SparkRuntimeException
+{
+  "errorClass" : "DUPLICATED_MAP_KEY",
+  "sqlState" : "23505",
+  "messageParameters" : {
+    "key" : "42",
+    "mapKeyDedupPolicy" : "\"spark.sql.mapKeyDedupPolicy\""
+  }
+}
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), map('new', 'key')) as 
transform_keys_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.INVALID_MAP_KEY_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "keyType" : "\"MAP<STRING, STRING>\"",
+    "sqlExpr" : "\"transform_keys(map(a, 1, b, 2), lambdafunction(map(new, 
key), namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 61,
+    "fragment" : "transform_keys(map('a', 1, 'b', 2), map('new', 'key'))"
+  } ]
+}
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), struct('key', 'value')) as 
transform_keys_struct_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.SparkRuntimeException
+{
+  "errorClass" : "DUPLICATED_MAP_KEY",
+  "sqlState" : "23505",
+  "messageParameters" : {
+    "key" : "[key,value]",
+    "mapKeyDedupPolicy" : "\"spark.sql.mapKeyDedupPolicy\""
+  }
+}
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), array('new_key')) as 
transform_keys_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.SparkRuntimeException
+{
+  "errorClass" : "DUPLICATED_MAP_KEY",
+  "sqlState" : "23505",
+  "messageParameters" : {
+    "key" : "[new_key]",
+    "mapKeyDedupPolicy" : "\"spark.sql.mapKeyDedupPolicy\""
+  }
+}
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), 999) as 
transform_values_int_literal
+-- !query schema
+struct<transform_values_int_literal:map<string,int>>
+-- !query output
+{"a":999,"b":999}
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), map('new', 'value')) as 
transform_values_map_literal
+-- !query schema
+struct<transform_values_map_literal:map<string,map<string,string>>>
+-- !query output
+{"a":{"new":"value"},"b":{"new":"value"}}
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), struct('val', 999)) as 
transform_values_struct_literal
+-- !query schema
+struct<transform_values_struct_literal:map<string,struct<col1:string,col2:int>>>
+-- !query output
+{"a":{"col1":"val","col2":999},"b":{"col1":"val","col2":999}}
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), array('new_value')) as 
transform_values_array_literal
+-- !query schema
+struct<transform_values_array_literal:map<string,array<string>>>
+-- !query output
+{"a":["new_value"],"b":["new_value"]}
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), 100) as zipwith_int_literal
+-- !query schema
+struct<zipwith_int_literal:array<int>>
+-- !query output
+[100,100,100]
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), map('merged', true)) as 
zipwith_map_literal
+-- !query schema
+struct<zipwith_map_literal:array<map<string,boolean>>>
+-- !query output
+[{"merged":true},{"merged":true},{"merged":true}]
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), struct('left', 1, 'right', 2)) 
as zipwith_struct_literal
+-- !query schema
+struct<zipwith_struct_literal:array<struct<col1:string,col2:int,col3:string,col4:int>>>
+-- !query output
+[{"col1":"left","col2":1,"col3":"right","col4":2},{"col1":"left","col2":1,"col3":"right","col4":2},{"col1":"left","col2":1,"col3":"right","col4":2}]
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), array('combined')) as 
zipwith_array_literal
+-- !query schema
+struct<zipwith_array_literal:array<array<string>>>
+-- !query output
+[["combined"],["combined"],["combined"]]
diff --git 
a/sql/core/src/test/resources/sql-tests/results/nonansi/higher-order-functions.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/nonansi/higher-order-functions.sql.out
index 7bfc35a61e09..b16bbcda2eb5 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/nonansi/higher-order-functions.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/nonansi/higher-order-functions.sql.out
@@ -348,3 +348,837 @@ select aggregate(split('abcdefgh',''), array(array('')), 
(acc, x) -> array(array
 struct<aggregate(split(abcdefgh, , -1), array(array()), 
lambdafunction(array(array(namedlambdavariable())), namedlambdavariable(), 
namedlambdavariable()), lambdafunction(namedlambdavariable(), 
namedlambdavariable())):array<array<string>>>
 -- !query output
 [["h"]]
+
+
+-- !query
+select aggregate(array(1, 2, 3), 0, 100) as aggregate_int_literal
+-- !query schema
+struct<aggregate_int_literal:int>
+-- !query output
+100
+
+
+-- !query
+select aggregate(array(1, 2, 3), map(), map('result', 999)) as 
aggregate_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(result, 999), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, INT>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"MAP<VOID, VOID>\"",
+    "sqlExpr" : "\"aggregate(array(1, 2, 3), map(), lambdafunction(map(result, 
999), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 59,
+    "fragment" : "aggregate(array(1, 2, 3), map(), map('result', 999))"
+  } ]
+}
+
+
+-- !query
+select aggregate(array(1, 2, 3), struct('init', 0), struct('final', 999)) as 
aggregate_struct_literal
+-- !query schema
+struct<aggregate_struct_literal:struct<col1:string,col2:int>>
+-- !query output
+{"col1":"final","col2":999}
+
+
+-- !query
+select aggregate(array(1, 2, 3), array(), array('result')) as 
aggregate_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(result), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<STRING>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"ARRAY<VOID>\"",
+    "sqlExpr" : "\"aggregate(array(1, 2, 3), array(), 
lambdafunction(array(result), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 58,
+    "fragment" : "aggregate(array(1, 2, 3), array(), array('result'))"
+  } ]
+}
+
+
+-- !query
+select array_sort(array(3, 1, 2), 1) as array_sort_int_literal
+-- !query schema
+struct<array_sort_int_literal:array<int>>
+-- !query output
+[3,1,2]
+
+
+-- !query
+select array_sort(array(3, 1, 2), map('compare', 0)) as array_sort_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_RETURN_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "actualType" : "\"MAP<STRING, INT>\"",
+    "expectedType" : "\"INT\"",
+    "functionName" : "`lambdafunction`",
+    "sqlExpr" : "\"array_sort(array(3, 1, 2), lambdafunction(map(compare, 0), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 52,
+    "fragment" : "array_sort(array(3, 1, 2), map('compare', 0))"
+  } ]
+}
+
+
+-- !query
+select array_sort(array(3, 1, 2), struct('result', 0)) as 
array_sort_struct_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_RETURN_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "actualType" : "\"STRUCT<col1: STRING NOT NULL, col2: INT NOT NULL>\"",
+    "expectedType" : "\"INT\"",
+    "functionName" : "`lambdafunction`",
+    "sqlExpr" : "\"array_sort(array(3, 1, 2), lambdafunction(struct(result, 
0), namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 54,
+    "fragment" : "array_sort(array(3, 1, 2), struct('result', 0))"
+  } ]
+}
+
+
+-- !query
+select array_sort(array(3, 1, 2), array(0)) as array_sort_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_RETURN_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "actualType" : "\"ARRAY<INT>\"",
+    "expectedType" : "\"INT\"",
+    "functionName" : "`lambdafunction`",
+    "sqlExpr" : "\"array_sort(array(3, 1, 2), lambdafunction(array(0), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 43,
+    "fragment" : "array_sort(array(3, 1, 2), array(0))"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), 1) as exists_int_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(1, 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 32,
+    "fragment" : "exists(array(1, 2, 3), 1)"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), map('found', true)) as exists_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(found, true), namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(map(found, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 49,
+    "fragment" : "exists(array(1, 2, 3), map('found', true))"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), struct('exists', true)) as exists_struct_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(exists, true), 
namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(struct(exists, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 53,
+    "fragment" : "exists(array(1, 2, 3), struct('exists', true))"
+  } ]
+}
+
+
+-- !query
+select exists(array(1, 2, 3), array(true)) as exists_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true), namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"exists(array(1, 2, 3), lambdafunction(array(true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 42,
+    "fragment" : "exists(array(1, 2, 3), array(true))"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), 1) as filter_int_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(1, 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 32,
+    "fragment" : "filter(array(1, 2, 3), 1)"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), map('key', 'value')) as filter_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(key, value), namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, STRING>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(map(key, value), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 50,
+    "fragment" : "filter(array(1, 2, 3), map('key', 'value'))"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), struct('valid', true)) as filter_struct_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(valid, true), 
namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(struct(valid, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 52,
+    "fragment" : "filter(array(1, 2, 3), struct('valid', true))"
+  } ]
+}
+
+
+-- !query
+select filter(array(1, 2, 3), array(true, false)) as filter_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true, false), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"filter(array(1, 2, 3), lambdafunction(array(true, false), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 49,
+    "fragment" : "filter(array(1, 2, 3), array(true, false))"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), 1) as forall_int_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(1, 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 32,
+    "fragment" : "forall(array(1, 2, 3), 1)"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), map('all', true)) as forall_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(all, true), namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(map(all, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 47,
+    "fragment" : "forall(array(1, 2, 3), map('all', true))"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), struct('all', true)) as forall_struct_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(all, true), 
namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(struct(all, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 50,
+    "fragment" : "forall(array(1, 2, 3), struct('all', true))"
+  } ]
+}
+
+
+-- !query
+select forall(array(1, 2, 3), array(true, true)) as forall_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true, true), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"forall(array(1, 2, 3), lambdafunction(array(true, true), 
namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 48,
+    "fragment" : "forall(array(1, 2, 3), array(true, true))"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), 1) as map_filter_int_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(1, namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"INT\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(1, 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 41,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), 1)"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), map('keep', true)) as 
map_filter_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(keep, true), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(map(keep, true), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 57,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), map('keep', true))"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), struct('filter', true)) as 
map_filter_struct_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(struct(filter, true), 
namedlambdavariable(), namedlambdavariable())\"",
+    "inputType" : "\"STRUCT<col1: STRING NOT NULL, col2: BOOLEAN NOT NULL>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(struct(filter, 
true), namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 62,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), struct('filter', true))"
+  } ]
+}
+
+
+-- !query
+select map_filter(map('a', 1, 'b', 2), array(true)) as map_filter_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(true), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<BOOLEAN>\"",
+    "paramIndex" : "second",
+    "requiredType" : "\"BOOLEAN\"",
+    "sqlExpr" : "\"map_filter(map(a, 1, b, 2), lambdafunction(array(true), 
namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 51,
+    "fragment" : "map_filter(map('a', 1, 'b', 2), array(true))"
+  } ]
+}
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), 100) as map_zipwith_int_literal
+-- !query schema
+struct<map_zipwith_int_literal:map<string,int>>
+-- !query output
+{"a":100}
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), map('merged', true)) as 
map_zipwith_map_literal
+-- !query schema
+struct<map_zipwith_map_literal:map<string,map<string,boolean>>>
+-- !query output
+{"a":{"merged":true}}
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), struct('left', 1, 'right', 10)) 
as map_zipwith_struct_literal
+-- !query schema
+struct<map_zipwith_struct_literal:map<string,struct<col1:string,col2:int,col3:string,col4:int>>>
+-- !query output
+{"a":{"col1":"left","col2":1,"col3":"right","col4":10}}
+
+
+-- !query
+select map_zip_with(map('a', 1), map('a', 10), array('combined')) as 
map_zipwith_array_literal
+-- !query schema
+struct<map_zipwith_array_literal:map<string,array<string>>>
+-- !query output
+{"a":["combined"]}
+
+
+-- !query
+select reduce(array(1, 2, 3), 0, 100) as reduce_int_literal
+-- !query schema
+struct<reduce_int_literal:int>
+-- !query output
+100
+
+
+-- !query
+select reduce(array(1, 2, 3), map(), map('result', 999)) as reduce_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(map(result, 999), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"MAP<STRING, INT>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"MAP<VOID, VOID>\"",
+    "sqlExpr" : "\"reduce(array(1, 2, 3), map(), lambdafunction(map(result, 
999), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 56,
+    "fragment" : "reduce(array(1, 2, 3), map(), map('result', 999))"
+  } ]
+}
+
+
+-- !query
+select reduce(array(1, 2, 3), struct('init', 0), struct('final', 999)) as 
reduce_struct_literal
+-- !query schema
+struct<reduce_struct_literal:struct<col1:string,col2:int>>
+-- !query output
+{"col1":"final","col2":999}
+
+
+-- !query
+select reduce(array(1, 2, 3), array(), array('result')) as reduce_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.UNEXPECTED_INPUT_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "inputSql" : "\"lambdafunction(array(result), namedlambdavariable(), 
namedlambdavariable())\"",
+    "inputType" : "\"ARRAY<STRING>\"",
+    "paramIndex" : "third",
+    "requiredType" : "\"ARRAY<VOID>\"",
+    "sqlExpr" : "\"reduce(array(1, 2, 3), array(), 
lambdafunction(array(result), namedlambdavariable(), namedlambdavariable()), 
lambdafunction(namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 55,
+    "fragment" : "reduce(array(1, 2, 3), array(), array('result'))"
+  } ]
+}
+
+
+-- !query
+select transform(array(1, 2, 3), 42) as transform_int_literal
+-- !query schema
+struct<transform_int_literal:array<int>>
+-- !query output
+[42,42,42]
+
+
+-- !query
+select transform(array(1, 2, 3), map('key', 'value')) as transform_map_literal
+-- !query schema
+struct<transform_map_literal:array<map<string,string>>>
+-- !query output
+[{"key":"value"},{"key":"value"},{"key":"value"}]
+
+
+-- !query
+select transform(array(1, 2, 3), struct('id', 99, 'name', 'test')) as 
transform_struct_literal
+-- !query schema
+struct<transform_struct_literal:array<struct<col1:string,col2:int,col3:string,col4:string>>>
+-- !query output
+[{"col1":"id","col2":99,"col3":"name","col4":"test"},{"col1":"id","col2":99,"col3":"name","col4":"test"},{"col1":"id","col2":99,"col3":"name","col4":"test"}]
+
+
+-- !query
+select transform(array(1, 2, 3), array('a', 'b')) as transform_array_literal
+-- !query schema
+struct<transform_array_literal:array<array<string>>>
+-- !query output
+[["a","b"],["a","b"],["a","b"]]
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), 42) as transform_keys_int_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.SparkRuntimeException
+{
+  "errorClass" : "DUPLICATED_MAP_KEY",
+  "sqlState" : "23505",
+  "messageParameters" : {
+    "key" : "42",
+    "mapKeyDedupPolicy" : "\"spark.sql.mapKeyDedupPolicy\""
+  }
+}
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), map('new', 'key')) as 
transform_keys_map_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.sql.catalyst.ExtendedAnalysisException
+{
+  "errorClass" : "DATATYPE_MISMATCH.INVALID_MAP_KEY_TYPE",
+  "sqlState" : "42K09",
+  "messageParameters" : {
+    "keyType" : "\"MAP<STRING, STRING>\"",
+    "sqlExpr" : "\"transform_keys(map(a, 1, b, 2), lambdafunction(map(new, 
key), namedlambdavariable(), namedlambdavariable()))\""
+  },
+  "queryContext" : [ {
+    "objectType" : "",
+    "objectName" : "",
+    "startIndex" : 8,
+    "stopIndex" : 61,
+    "fragment" : "transform_keys(map('a', 1, 'b', 2), map('new', 'key'))"
+  } ]
+}
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), struct('key', 'value')) as 
transform_keys_struct_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.SparkRuntimeException
+{
+  "errorClass" : "DUPLICATED_MAP_KEY",
+  "sqlState" : "23505",
+  "messageParameters" : {
+    "key" : "[key,value]",
+    "mapKeyDedupPolicy" : "\"spark.sql.mapKeyDedupPolicy\""
+  }
+}
+
+
+-- !query
+select transform_keys(map('a', 1, 'b', 2), array('new_key')) as 
transform_keys_array_literal
+-- !query schema
+struct<>
+-- !query output
+org.apache.spark.SparkRuntimeException
+{
+  "errorClass" : "DUPLICATED_MAP_KEY",
+  "sqlState" : "23505",
+  "messageParameters" : {
+    "key" : "[new_key]",
+    "mapKeyDedupPolicy" : "\"spark.sql.mapKeyDedupPolicy\""
+  }
+}
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), 999) as 
transform_values_int_literal
+-- !query schema
+struct<transform_values_int_literal:map<string,int>>
+-- !query output
+{"a":999,"b":999}
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), map('new', 'value')) as 
transform_values_map_literal
+-- !query schema
+struct<transform_values_map_literal:map<string,map<string,string>>>
+-- !query output
+{"a":{"new":"value"},"b":{"new":"value"}}
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), struct('val', 999)) as 
transform_values_struct_literal
+-- !query schema
+struct<transform_values_struct_literal:map<string,struct<col1:string,col2:int>>>
+-- !query output
+{"a":{"col1":"val","col2":999},"b":{"col1":"val","col2":999}}
+
+
+-- !query
+select transform_values(map('a', 1, 'b', 2), array('new_value')) as 
transform_values_array_literal
+-- !query schema
+struct<transform_values_array_literal:map<string,array<string>>>
+-- !query output
+{"a":["new_value"],"b":["new_value"]}
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), 100) as zipwith_int_literal
+-- !query schema
+struct<zipwith_int_literal:array<int>>
+-- !query output
+[100,100,100]
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), map('merged', true)) as 
zipwith_map_literal
+-- !query schema
+struct<zipwith_map_literal:array<map<string,boolean>>>
+-- !query output
+[{"merged":true},{"merged":true},{"merged":true}]
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), struct('left', 1, 'right', 2)) 
as zipwith_struct_literal
+-- !query schema
+struct<zipwith_struct_literal:array<struct<col1:string,col2:int,col3:string,col4:int>>>
+-- !query output
+[{"col1":"left","col2":1,"col3":"right","col4":2},{"col1":"left","col2":1,"col3":"right","col4":2},{"col1":"left","col2":1,"col3":"right","col4":2}]
+
+
+-- !query
+select zip_with(array(1, 2, 3), array(4, 5, 6), array('combined')) as 
zipwith_array_literal
+-- !query schema
+struct<zipwith_array_literal:array<array<string>>>
+-- !query output
+[["combined"],["combined"],["combined"]]


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to