npawar commented on a change in pull request #7016:
URL: https://github.com/apache/incubator-pinot/pull/7016#discussion_r645765085



##########
File path: 
pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/RealtimeKinesisIntegrationTest.java
##########
@@ -0,0 +1,448 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.integration.tests;
+
+import cloud.localstack.Localstack;
+import cloud.localstack.docker.annotation.LocalstackDockerAnnotationProcessor;
+import cloud.localstack.docker.annotation.LocalstackDockerConfiguration;
+import cloud.localstack.docker.annotation.LocalstackDockerProperties;
+import cloud.localstack.docker.command.Command;
+import cloud.localstack.docker.exception.LocalstackDockerException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.JsonNodeType;
+import com.google.common.base.Function;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import javax.activation.UnsupportedDataTypeException;
+import javax.annotation.Nullable;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.pinot.client.Request;
+import org.apache.pinot.client.ResultSet;
+import org.apache.pinot.common.utils.StringUtil;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConfig;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConsumerFactory;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.stream.StreamConfig;
+import org.apache.pinot.spi.stream.StreamConfigProperties;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.builder.TableConfigBuilder;
+import org.apache.pinot.util.TestUtils;
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.http.SdkHttpConfigurationOption;
+import software.amazon.awssdk.http.apache.ApacheSdkHttpService;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.kinesis.KinesisClient;
+import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordResponse;
+import software.amazon.awssdk.services.kinesis.model.ShardIteratorType;
+import software.amazon.awssdk.utils.AttributeMap;
+
+import static org.awaitility.Awaitility.await;
+
+
+@LocalstackDockerProperties(services = {"kinesis", "dynamodb"})
+public class RealtimeKinesisIntegrationTest extends 
BaseClusterIntegrationTestSet {
+  private static final LocalstackDockerAnnotationProcessor PROCESSOR = new 
LocalstackDockerAnnotationProcessor();
+  private static final String STREAM_NAME = "kinesis-test";
+  private static final String STREAM_TYPE = "kinesis";
+  public static final int MAX_RECORDS_TO_FETCH = 2000;
+
+  public static final String REGION = "us-east-1";
+  public static final String LOCALSTACK_KINESIS_ENDPOINT = 
"http://localhost:4566";;
+  public static final int NUM_SHARDS = 10;
+
+  public static final String SCHEMA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.schema";
+  public static final String DATA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.json";
+
+  private final Localstack localstackDocker = Localstack.INSTANCE;
+
+  private static KinesisClient kinesisClient = null;
+
+  private long totalRecordsPushedInStream = 0;
+
+  List<String> h2FieldNameAndTypes = new ArrayList<>();
+
+  @BeforeClass
+  public void setUp()
+      throws Exception {
+    TestUtils.ensureDirectoriesExistAndEmpty(_tempDir);
+
+    // Start the Pinot cluster
+    startZk();
+    startController();
+    startBroker();
+    startServer();
+
+    // Start Kinesis
+    startKinesis();
+
+    // Create and upload the schema and table config
+    addSchema(createKinesisSchema());
+    addTableConfig(createKinesisTableConfig());
+
+    createH2ConnectionAndTable();
+
+    // Push data into Kinesis
+    publishRecordsToKinesis();
+
+    // Wait for all documents loaded
+    waitForAllDocsLoadedKinesis(60_000L);
+  }
+
+  public Schema createKinesisSchema()
+      throws Exception {
+    URL resourceUrl = 
BaseClusterIntegrationTest.class.getClassLoader().getResource(SCHEMA_FILE_PATH);
+    Assert.assertNotNull(resourceUrl);
+    return Schema.fromFile(new File(resourceUrl.getFile()));
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs)
+      throws Exception {
+    waitForAllDocsLoadedKinesis(timeoutMs, true);
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs, boolean 
raiseError) {
+    TestUtils.waitForCondition(new Function<Void, Boolean>() {
+      @Nullable
+      @Override
+      public Boolean apply(@Nullable Void aVoid) {
+        try {
+          return getCurrentCountStarResult() >= totalRecordsPushedInStream;
+        } catch (Exception e) {
+          return null;
+        }
+      }
+    }, 1000L, timeoutMs, "Failed to load " + totalRecordsPushedInStream + " 
documents", raiseError);
+  }
+
+  public TableConfig createKinesisTableConfig() {
+    return new 
TableConfigBuilder(TableType.REALTIME).setTableName(getTableName()).setSchemaName(getTableName())
+        
.setTimeColumnName("DaysSinceEpoch").setFieldConfigList(getFieldConfigs()).setNumReplicas(getNumReplicas())
+        
.setSegmentVersion(getSegmentVersion()).setLoadMode(getLoadMode()).setTaskConfig(getTaskConfig())
+        
.setBrokerTenant(getBrokerTenant()).setServerTenant(getServerTenant()).setIngestionConfig(getIngestionConfig())
+        
.setLLC(true).setStreamConfigs(createKinesisStreamConfig()).setNullHandlingEnabled(getNullHandlingEnabled())
+        .build();
+  }
+
+  public Map<String, String> createKinesisStreamConfig() {
+    Map<String, String> streamConfigMap = new HashMap<>();
+    String streamType = "kinesis";
+    streamConfigMap.put(StreamConfigProperties.STREAM_TYPE, streamType);
+
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_TOPIC_NAME),
+            STREAM_NAME);
+
+    streamConfigMap.put(
+        StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_FETCH_TIMEOUT_MILLIS),
+        "30000");
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_TYPES),
+            StreamConfig.ConsumerType.LOWLEVEL.toString());
+    streamConfigMap.put(StreamConfigProperties
+            .constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_FACTORY_CLASS),
+        KinesisConsumerFactory.class.getName());
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_DECODER_CLASS),
+            "org.apache.pinot.plugin.stream.kafka.KafkaJSONMessageDecoder");
+    streamConfigMap.put(KinesisConfig.REGION, REGION);
+    streamConfigMap.put(KinesisConfig.MAX_RECORDS_TO_FETCH, 
String.valueOf(MAX_RECORDS_TO_FETCH));
+    streamConfigMap.put(KinesisConfig.SHARD_ITERATOR_TYPE, 
ShardIteratorType.AT_SEQUENCE_NUMBER.toString());
+    streamConfigMap.put(KinesisConfig.ENDPOINT, LOCALSTACK_KINESIS_ENDPOINT);
+    streamConfigMap.put(KinesisConfig.ACCESS_KEY, 
getLocalAWSCredentials().resolveCredentials().accessKeyId());
+    streamConfigMap.put(KinesisConfig.SECRET_KEY, 
getLocalAWSCredentials().resolveCredentials().secretAccessKey());
+    streamConfigMap.put(StreamConfigProperties.SEGMENT_FLUSH_THRESHOLD_ROWS, 
Integer.toString(5000));
+    streamConfigMap.put(StreamConfigProperties
+        .constructStreamProperty(streamType, 
StreamConfigProperties.STREAM_CONSUMER_OFFSET_CRITERIA), "smallest");
+    return streamConfigMap;
+  }
+
+  public void startKinesis()
+      throws Exception {
+    try {
+      final LocalstackDockerConfiguration dockerConfig = 
PROCESSOR.process(this.getClass());
+      localstackDocker.startup(dockerConfig);
+    } catch (LocalstackDockerException e) {
+      StopAllLocalstackDockerCommand stopAllLocalstackDockerCommand = new 
StopAllLocalstackDockerCommand();
+      stopAllLocalstackDockerCommand.execute();
+
+      final LocalstackDockerConfiguration dockerConfig = 
PROCESSOR.process(this.getClass());
+      localstackDocker.startup(dockerConfig);
+    }
+
+    kinesisClient = KinesisClient.builder().httpClient(new 
ApacheSdkHttpService().createHttpClientBuilder()
+        .buildWithDefaults(
+            
AttributeMap.builder().put(SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES, 
Boolean.TRUE).build()))
+        
.credentialsProvider(getLocalAWSCredentials()).region(Region.of(REGION))
+        .endpointOverride(new URI(LOCALSTACK_KINESIS_ENDPOINT)).build();
+
+    
kinesisClient.createStream(CreateStreamRequest.builder().streamName(STREAM_NAME).shardCount(NUM_SHARDS).build());
+    await().until(() -> 
kinesisClient.describeStream(DescribeStreamRequest.builder().streamName(STREAM_NAME).build())

Review comment:
       it looks like we're introducing awaitility dependency only for this one 
check. Could we just use TestUtils.waitForCondition?

##########
File path: 
pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/RealtimeKinesisIntegrationTest.java
##########
@@ -0,0 +1,448 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.integration.tests;
+
+import cloud.localstack.Localstack;
+import cloud.localstack.docker.annotation.LocalstackDockerAnnotationProcessor;
+import cloud.localstack.docker.annotation.LocalstackDockerConfiguration;
+import cloud.localstack.docker.annotation.LocalstackDockerProperties;
+import cloud.localstack.docker.command.Command;
+import cloud.localstack.docker.exception.LocalstackDockerException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.JsonNodeType;
+import com.google.common.base.Function;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import javax.activation.UnsupportedDataTypeException;
+import javax.annotation.Nullable;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.pinot.client.Request;
+import org.apache.pinot.client.ResultSet;
+import org.apache.pinot.common.utils.StringUtil;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConfig;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConsumerFactory;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.stream.StreamConfig;
+import org.apache.pinot.spi.stream.StreamConfigProperties;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.builder.TableConfigBuilder;
+import org.apache.pinot.util.TestUtils;
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.http.SdkHttpConfigurationOption;
+import software.amazon.awssdk.http.apache.ApacheSdkHttpService;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.kinesis.KinesisClient;
+import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordResponse;
+import software.amazon.awssdk.services.kinesis.model.ShardIteratorType;
+import software.amazon.awssdk.utils.AttributeMap;
+
+import static org.awaitility.Awaitility.await;
+
+
+@LocalstackDockerProperties(services = {"kinesis", "dynamodb"})
+public class RealtimeKinesisIntegrationTest extends 
BaseClusterIntegrationTestSet {
+  private static final LocalstackDockerAnnotationProcessor PROCESSOR = new 
LocalstackDockerAnnotationProcessor();
+  private static final String STREAM_NAME = "kinesis-test";
+  private static final String STREAM_TYPE = "kinesis";
+  public static final int MAX_RECORDS_TO_FETCH = 2000;
+
+  public static final String REGION = "us-east-1";
+  public static final String LOCALSTACK_KINESIS_ENDPOINT = 
"http://localhost:4566";;
+  public static final int NUM_SHARDS = 10;
+
+  public static final String SCHEMA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.schema";

Review comment:
       can you add a comment here about why you had to make a "reduced" 
version, instead of directly using the schema/data as is?

##########
File path: 
pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/main/java/org/apache/pinot/plugin/stream/kinesis/KinesisConnectionHandler.java
##########
@@ -73,17 +79,26 @@ public KinesisConnectionHandler(KinesisConfig 
kinesisConfig, KinesisClient kines
    */
   public void createConnection() {
     if (_kinesisClient == null) {
+      KinesisClientBuilder kinesisClientBuilder;
       if (StringUtils.isNotBlank(_accessKey) && 
StringUtils.isNotBlank(_secretKey)) {
         AwsBasicCredentials awsBasicCredentials = 
AwsBasicCredentials.create(_accessKey, _secretKey);
-        _kinesisClient = KinesisClient.builder().region(Region.of(_region))
+        kinesisClientBuilder = 
KinesisClient.builder().region(Region.of(_region))
             
.credentialsProvider(StaticCredentialsProvider.create(awsBasicCredentials))
-            .httpClientBuilder(new 
ApacheSdkHttpService().createHttpClientBuilder())
-            .build();
+            .httpClientBuilder(new 
ApacheSdkHttpService().createHttpClientBuilder());
       } else {
-        _kinesisClient =
-            
KinesisClient.builder().region(Region.of(_region)).credentialsProvider(DefaultCredentialsProvider.create())
-                .build();
+        kinesisClientBuilder =
+            
KinesisClient.builder().region(Region.of(_region)).credentialsProvider(DefaultCredentialsProvider.create());
       }
+
+      if(StringUtils.isNotBlank(_endpoint)){

Review comment:
       nit: formatting seems off in this file

##########
File path: 
pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/RealtimeKinesisIntegrationTest.java
##########
@@ -0,0 +1,448 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.integration.tests;
+
+import cloud.localstack.Localstack;
+import cloud.localstack.docker.annotation.LocalstackDockerAnnotationProcessor;
+import cloud.localstack.docker.annotation.LocalstackDockerConfiguration;
+import cloud.localstack.docker.annotation.LocalstackDockerProperties;
+import cloud.localstack.docker.command.Command;
+import cloud.localstack.docker.exception.LocalstackDockerException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.JsonNodeType;
+import com.google.common.base.Function;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import javax.activation.UnsupportedDataTypeException;
+import javax.annotation.Nullable;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.pinot.client.Request;
+import org.apache.pinot.client.ResultSet;
+import org.apache.pinot.common.utils.StringUtil;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConfig;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConsumerFactory;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.stream.StreamConfig;
+import org.apache.pinot.spi.stream.StreamConfigProperties;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.builder.TableConfigBuilder;
+import org.apache.pinot.util.TestUtils;
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.http.SdkHttpConfigurationOption;
+import software.amazon.awssdk.http.apache.ApacheSdkHttpService;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.kinesis.KinesisClient;
+import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordResponse;
+import software.amazon.awssdk.services.kinesis.model.ShardIteratorType;
+import software.amazon.awssdk.utils.AttributeMap;
+
+import static org.awaitility.Awaitility.await;
+
+
+@LocalstackDockerProperties(services = {"kinesis", "dynamodb"})

Review comment:
       why does this need dynamodb?

##########
File path: 
pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/RealtimeKinesisIntegrationTest.java
##########
@@ -0,0 +1,448 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.integration.tests;
+
+import cloud.localstack.Localstack;
+import cloud.localstack.docker.annotation.LocalstackDockerAnnotationProcessor;
+import cloud.localstack.docker.annotation.LocalstackDockerConfiguration;
+import cloud.localstack.docker.annotation.LocalstackDockerProperties;
+import cloud.localstack.docker.command.Command;
+import cloud.localstack.docker.exception.LocalstackDockerException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.JsonNodeType;
+import com.google.common.base.Function;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import javax.activation.UnsupportedDataTypeException;
+import javax.annotation.Nullable;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.pinot.client.Request;
+import org.apache.pinot.client.ResultSet;
+import org.apache.pinot.common.utils.StringUtil;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConfig;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConsumerFactory;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.stream.StreamConfig;
+import org.apache.pinot.spi.stream.StreamConfigProperties;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.builder.TableConfigBuilder;
+import org.apache.pinot.util.TestUtils;
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.http.SdkHttpConfigurationOption;
+import software.amazon.awssdk.http.apache.ApacheSdkHttpService;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.kinesis.KinesisClient;
+import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordResponse;
+import software.amazon.awssdk.services.kinesis.model.ShardIteratorType;
+import software.amazon.awssdk.utils.AttributeMap;
+
+import static org.awaitility.Awaitility.await;
+
+
+@LocalstackDockerProperties(services = {"kinesis", "dynamodb"})
+public class RealtimeKinesisIntegrationTest extends 
BaseClusterIntegrationTestSet {
+  private static final LocalstackDockerAnnotationProcessor PROCESSOR = new 
LocalstackDockerAnnotationProcessor();
+  private static final String STREAM_NAME = "kinesis-test";
+  private static final String STREAM_TYPE = "kinesis";
+  public static final int MAX_RECORDS_TO_FETCH = 2000;
+
+  public static final String REGION = "us-east-1";
+  public static final String LOCALSTACK_KINESIS_ENDPOINT = 
"http://localhost:4566";;
+  public static final int NUM_SHARDS = 10;
+
+  public static final String SCHEMA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.schema";
+  public static final String DATA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.json";
+
+  private final Localstack localstackDocker = Localstack.INSTANCE;
+
+  private static KinesisClient kinesisClient = null;
+
+  private long totalRecordsPushedInStream = 0;
+
+  List<String> h2FieldNameAndTypes = new ArrayList<>();
+
+  @BeforeClass
+  public void setUp()
+      throws Exception {
+    TestUtils.ensureDirectoriesExistAndEmpty(_tempDir);
+
+    // Start the Pinot cluster
+    startZk();
+    startController();
+    startBroker();
+    startServer();
+
+    // Start Kinesis
+    startKinesis();
+
+    // Create and upload the schema and table config
+    addSchema(createKinesisSchema());
+    addTableConfig(createKinesisTableConfig());
+
+    createH2ConnectionAndTable();
+
+    // Push data into Kinesis
+    publishRecordsToKinesis();
+
+    // Wait for all documents loaded
+    waitForAllDocsLoadedKinesis(60_000L);
+  }
+
+  public Schema createKinesisSchema()
+      throws Exception {
+    URL resourceUrl = 
BaseClusterIntegrationTest.class.getClassLoader().getResource(SCHEMA_FILE_PATH);
+    Assert.assertNotNull(resourceUrl);
+    return Schema.fromFile(new File(resourceUrl.getFile()));
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs)
+      throws Exception {
+    waitForAllDocsLoadedKinesis(timeoutMs, true);
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs, boolean 
raiseError) {
+    TestUtils.waitForCondition(new Function<Void, Boolean>() {
+      @Nullable
+      @Override
+      public Boolean apply(@Nullable Void aVoid) {
+        try {
+          return getCurrentCountStarResult() >= totalRecordsPushedInStream;
+        } catch (Exception e) {
+          return null;
+        }
+      }
+    }, 1000L, timeoutMs, "Failed to load " + totalRecordsPushedInStream + " 
documents", raiseError);
+  }
+
+  public TableConfig createKinesisTableConfig() {
+    return new 
TableConfigBuilder(TableType.REALTIME).setTableName(getTableName()).setSchemaName(getTableName())
+        
.setTimeColumnName("DaysSinceEpoch").setFieldConfigList(getFieldConfigs()).setNumReplicas(getNumReplicas())
+        
.setSegmentVersion(getSegmentVersion()).setLoadMode(getLoadMode()).setTaskConfig(getTaskConfig())
+        
.setBrokerTenant(getBrokerTenant()).setServerTenant(getServerTenant()).setIngestionConfig(getIngestionConfig())
+        
.setLLC(true).setStreamConfigs(createKinesisStreamConfig()).setNullHandlingEnabled(getNullHandlingEnabled())
+        .build();
+  }
+
+  public Map<String, String> createKinesisStreamConfig() {
+    Map<String, String> streamConfigMap = new HashMap<>();
+    String streamType = "kinesis";
+    streamConfigMap.put(StreamConfigProperties.STREAM_TYPE, streamType);
+
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_TOPIC_NAME),
+            STREAM_NAME);
+
+    streamConfigMap.put(
+        StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_FETCH_TIMEOUT_MILLIS),
+        "30000");
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_TYPES),
+            StreamConfig.ConsumerType.LOWLEVEL.toString());
+    streamConfigMap.put(StreamConfigProperties
+            .constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_FACTORY_CLASS),
+        KinesisConsumerFactory.class.getName());
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_DECODER_CLASS),
+            "org.apache.pinot.plugin.stream.kafka.KafkaJSONMessageDecoder");
+    streamConfigMap.put(KinesisConfig.REGION, REGION);
+    streamConfigMap.put(KinesisConfig.MAX_RECORDS_TO_FETCH, 
String.valueOf(MAX_RECORDS_TO_FETCH));
+    streamConfigMap.put(KinesisConfig.SHARD_ITERATOR_TYPE, 
ShardIteratorType.AT_SEQUENCE_NUMBER.toString());
+    streamConfigMap.put(KinesisConfig.ENDPOINT, LOCALSTACK_KINESIS_ENDPOINT);
+    streamConfigMap.put(KinesisConfig.ACCESS_KEY, 
getLocalAWSCredentials().resolveCredentials().accessKeyId());
+    streamConfigMap.put(KinesisConfig.SECRET_KEY, 
getLocalAWSCredentials().resolveCredentials().secretAccessKey());
+    streamConfigMap.put(StreamConfigProperties.SEGMENT_FLUSH_THRESHOLD_ROWS, 
Integer.toString(5000));
+    streamConfigMap.put(StreamConfigProperties
+        .constructStreamProperty(streamType, 
StreamConfigProperties.STREAM_CONSUMER_OFFSET_CRITERIA), "smallest");
+    return streamConfigMap;
+  }
+
+  public void startKinesis()
+      throws Exception {
+    try {
+      final LocalstackDockerConfiguration dockerConfig = 
PROCESSOR.process(this.getClass());
+      localstackDocker.startup(dockerConfig);
+    } catch (LocalstackDockerException e) {
+      StopAllLocalstackDockerCommand stopAllLocalstackDockerCommand = new 
StopAllLocalstackDockerCommand();
+      stopAllLocalstackDockerCommand.execute();
+
+      final LocalstackDockerConfiguration dockerConfig = 
PROCESSOR.process(this.getClass());
+      localstackDocker.startup(dockerConfig);
+    }
+
+    kinesisClient = KinesisClient.builder().httpClient(new 
ApacheSdkHttpService().createHttpClientBuilder()
+        .buildWithDefaults(
+            
AttributeMap.builder().put(SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES, 
Boolean.TRUE).build()))
+        
.credentialsProvider(getLocalAWSCredentials()).region(Region.of(REGION))
+        .endpointOverride(new URI(LOCALSTACK_KINESIS_ENDPOINT)).build();
+
+    
kinesisClient.createStream(CreateStreamRequest.builder().streamName(STREAM_NAME).shardCount(NUM_SHARDS).build());
+    await().until(() -> 
kinesisClient.describeStream(DescribeStreamRequest.builder().streamName(STREAM_NAME).build())
+        .streamDescription().streamStatusAsString().equals("ACTIVE"));
+  }
+
+  public void stopKinesis() {
+    if (localstackDocker.isRunning()) {
+      localstackDocker.stop();
+    }
+  }
+
+  private void publishRecordsToKinesis() {
+    try {
+      StringBuilder params = new StringBuilder("?");
+      for (int i = 0; i < h2FieldNameAndTypes.size() - 1; i++) {
+        params.append(",?");
+      }
+      PreparedStatement h2Statement =
+          _h2Connection.prepareStatement("INSERT INTO " + getTableName() + " 
VALUES (" + params.toString() + ")");
+
+      InputStream inputStream =
+          
RealtimeKinesisIntegrationTest.class.getClassLoader().getResourceAsStream(DATA_FILE_PATH);
+
+      try (BufferedReader br = new BufferedReader(new 
InputStreamReader(inputStream, StandardCharsets.UTF_8))) {
+        String line;
+        while ((line = br.readLine()) != null) {
+          JsonNode data = JsonUtils.stringToJsonNode(line);
+
+          PutRecordRequest putRecordRequest =
+              
PutRecordRequest.builder().streamName(STREAM_NAME).data(SdkBytes.fromUtf8String(line))
+                  .partitionKey(data.get("Origin").textValue()).build();
+          PutRecordResponse putRecordResponse = 
kinesisClient.putRecord(putRecordRequest);
+          if (putRecordResponse.sdkHttpResponse().statusCode() == 200) {
+            if (StringUtils.isNotBlank(putRecordResponse.sequenceNumber()) && 
StringUtils
+                .isNotBlank(putRecordResponse.shardId())) {
+              totalRecordsPushedInStream++;
+
+              int fieldIndex = 1;
+              for (String fieldNameAndDatatype : h2FieldNameAndTypes) {
+                String[] fieldNameAndDatatypeList = 
fieldNameAndDatatype.split(" ");
+                String fieldName = fieldNameAndDatatypeList[0];
+                String h2DataType = fieldNameAndDatatypeList[1];
+                switch (h2DataType) {
+                  case "int": {
+                    h2Statement.setObject(fieldIndex++, 
data.get(fieldName).intValue());
+                    break;
+                  }
+                  case "varchar(128)": {
+                    h2Statement.setObject(fieldIndex++, 
data.get(fieldName).textValue());
+                    break;
+                  }
+                  default: {

Review comment:
       remove empty default block?

##########
File path: 
pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/RealtimeKinesisIntegrationTest.java
##########
@@ -0,0 +1,448 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.integration.tests;
+
+import cloud.localstack.Localstack;
+import cloud.localstack.docker.annotation.LocalstackDockerAnnotationProcessor;
+import cloud.localstack.docker.annotation.LocalstackDockerConfiguration;
+import cloud.localstack.docker.annotation.LocalstackDockerProperties;
+import cloud.localstack.docker.command.Command;
+import cloud.localstack.docker.exception.LocalstackDockerException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.JsonNodeType;
+import com.google.common.base.Function;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import javax.activation.UnsupportedDataTypeException;
+import javax.annotation.Nullable;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.pinot.client.Request;
+import org.apache.pinot.client.ResultSet;
+import org.apache.pinot.common.utils.StringUtil;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConfig;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConsumerFactory;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.stream.StreamConfig;
+import org.apache.pinot.spi.stream.StreamConfigProperties;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.builder.TableConfigBuilder;
+import org.apache.pinot.util.TestUtils;
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.http.SdkHttpConfigurationOption;
+import software.amazon.awssdk.http.apache.ApacheSdkHttpService;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.kinesis.KinesisClient;
+import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordResponse;
+import software.amazon.awssdk.services.kinesis.model.ShardIteratorType;
+import software.amazon.awssdk.utils.AttributeMap;
+
+import static org.awaitility.Awaitility.await;
+
+
+@LocalstackDockerProperties(services = {"kinesis", "dynamodb"})
+public class RealtimeKinesisIntegrationTest extends 
BaseClusterIntegrationTestSet {
+  private static final LocalstackDockerAnnotationProcessor PROCESSOR = new 
LocalstackDockerAnnotationProcessor();
+  private static final String STREAM_NAME = "kinesis-test";
+  private static final String STREAM_TYPE = "kinesis";
+  public static final int MAX_RECORDS_TO_FETCH = 2000;
+
+  public static final String REGION = "us-east-1";
+  public static final String LOCALSTACK_KINESIS_ENDPOINT = 
"http://localhost:4566";;
+  public static final int NUM_SHARDS = 10;
+
+  public static final String SCHEMA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.schema";
+  public static final String DATA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.json";
+
+  private final Localstack localstackDocker = Localstack.INSTANCE;
+
+  private static KinesisClient kinesisClient = null;
+
+  private long totalRecordsPushedInStream = 0;
+
+  List<String> h2FieldNameAndTypes = new ArrayList<>();
+
+  @BeforeClass
+  public void setUp()
+      throws Exception {
+    TestUtils.ensureDirectoriesExistAndEmpty(_tempDir);
+
+    // Start the Pinot cluster
+    startZk();
+    startController();
+    startBroker();
+    startServer();
+
+    // Start Kinesis
+    startKinesis();
+
+    // Create and upload the schema and table config
+    addSchema(createKinesisSchema());
+    addTableConfig(createKinesisTableConfig());
+
+    createH2ConnectionAndTable();
+
+    // Push data into Kinesis
+    publishRecordsToKinesis();
+
+    // Wait for all documents loaded
+    waitForAllDocsLoadedKinesis(60_000L);
+  }
+
+  public Schema createKinesisSchema()
+      throws Exception {
+    URL resourceUrl = 
BaseClusterIntegrationTest.class.getClassLoader().getResource(SCHEMA_FILE_PATH);
+    Assert.assertNotNull(resourceUrl);
+    return Schema.fromFile(new File(resourceUrl.getFile()));
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs)
+      throws Exception {
+    waitForAllDocsLoadedKinesis(timeoutMs, true);
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs, boolean 
raiseError) {
+    TestUtils.waitForCondition(new Function<Void, Boolean>() {
+      @Nullable
+      @Override
+      public Boolean apply(@Nullable Void aVoid) {
+        try {
+          return getCurrentCountStarResult() >= totalRecordsPushedInStream;
+        } catch (Exception e) {
+          return null;

Review comment:
       log this exception

##########
File path: 
pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/RealtimeKinesisIntegrationTest.java
##########
@@ -0,0 +1,448 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.integration.tests;
+
+import cloud.localstack.Localstack;
+import cloud.localstack.docker.annotation.LocalstackDockerAnnotationProcessor;
+import cloud.localstack.docker.annotation.LocalstackDockerConfiguration;
+import cloud.localstack.docker.annotation.LocalstackDockerProperties;
+import cloud.localstack.docker.command.Command;
+import cloud.localstack.docker.exception.LocalstackDockerException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.JsonNodeType;
+import com.google.common.base.Function;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import javax.activation.UnsupportedDataTypeException;
+import javax.annotation.Nullable;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.pinot.client.Request;
+import org.apache.pinot.client.ResultSet;
+import org.apache.pinot.common.utils.StringUtil;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConfig;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConsumerFactory;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.stream.StreamConfig;
+import org.apache.pinot.spi.stream.StreamConfigProperties;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.builder.TableConfigBuilder;
+import org.apache.pinot.util.TestUtils;
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.http.SdkHttpConfigurationOption;
+import software.amazon.awssdk.http.apache.ApacheSdkHttpService;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.kinesis.KinesisClient;
+import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordResponse;
+import software.amazon.awssdk.services.kinesis.model.ShardIteratorType;
+import software.amazon.awssdk.utils.AttributeMap;
+
+import static org.awaitility.Awaitility.await;
+
+
+@LocalstackDockerProperties(services = {"kinesis", "dynamodb"})
+public class RealtimeKinesisIntegrationTest extends 
BaseClusterIntegrationTestSet {
+  private static final LocalstackDockerAnnotationProcessor PROCESSOR = new 
LocalstackDockerAnnotationProcessor();
+  private static final String STREAM_NAME = "kinesis-test";
+  private static final String STREAM_TYPE = "kinesis";
+  public static final int MAX_RECORDS_TO_FETCH = 2000;
+
+  public static final String REGION = "us-east-1";
+  public static final String LOCALSTACK_KINESIS_ENDPOINT = 
"http://localhost:4566";;
+  public static final int NUM_SHARDS = 10;
+
+  public static final String SCHEMA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.schema";
+  public static final String DATA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.json";
+
+  private final Localstack localstackDocker = Localstack.INSTANCE;
+
+  private static KinesisClient kinesisClient = null;
+
+  private long totalRecordsPushedInStream = 0;
+
+  List<String> h2FieldNameAndTypes = new ArrayList<>();
+
+  @BeforeClass
+  public void setUp()
+      throws Exception {
+    TestUtils.ensureDirectoriesExistAndEmpty(_tempDir);
+
+    // Start the Pinot cluster
+    startZk();
+    startController();
+    startBroker();
+    startServer();
+
+    // Start Kinesis
+    startKinesis();
+
+    // Create and upload the schema and table config
+    addSchema(createKinesisSchema());
+    addTableConfig(createKinesisTableConfig());
+
+    createH2ConnectionAndTable();
+
+    // Push data into Kinesis
+    publishRecordsToKinesis();
+
+    // Wait for all documents loaded
+    waitForAllDocsLoadedKinesis(60_000L);
+  }
+
+  public Schema createKinesisSchema()
+      throws Exception {
+    URL resourceUrl = 
BaseClusterIntegrationTest.class.getClassLoader().getResource(SCHEMA_FILE_PATH);
+    Assert.assertNotNull(resourceUrl);
+    return Schema.fromFile(new File(resourceUrl.getFile()));
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs)
+      throws Exception {
+    waitForAllDocsLoadedKinesis(timeoutMs, true);
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs, boolean 
raiseError) {
+    TestUtils.waitForCondition(new Function<Void, Boolean>() {
+      @Nullable
+      @Override
+      public Boolean apply(@Nullable Void aVoid) {
+        try {
+          return getCurrentCountStarResult() >= totalRecordsPushedInStream;
+        } catch (Exception e) {
+          return null;
+        }
+      }
+    }, 1000L, timeoutMs, "Failed to load " + totalRecordsPushedInStream + " 
documents", raiseError);
+  }
+
+  public TableConfig createKinesisTableConfig() {
+    return new 
TableConfigBuilder(TableType.REALTIME).setTableName(getTableName()).setSchemaName(getTableName())
+        
.setTimeColumnName("DaysSinceEpoch").setFieldConfigList(getFieldConfigs()).setNumReplicas(getNumReplicas())
+        
.setSegmentVersion(getSegmentVersion()).setLoadMode(getLoadMode()).setTaskConfig(getTaskConfig())
+        
.setBrokerTenant(getBrokerTenant()).setServerTenant(getServerTenant()).setIngestionConfig(getIngestionConfig())
+        
.setLLC(true).setStreamConfigs(createKinesisStreamConfig()).setNullHandlingEnabled(getNullHandlingEnabled())
+        .build();
+  }
+
+  public Map<String, String> createKinesisStreamConfig() {
+    Map<String, String> streamConfigMap = new HashMap<>();
+    String streamType = "kinesis";
+    streamConfigMap.put(StreamConfigProperties.STREAM_TYPE, streamType);
+
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_TOPIC_NAME),
+            STREAM_NAME);
+
+    streamConfigMap.put(
+        StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_FETCH_TIMEOUT_MILLIS),
+        "30000");
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_TYPES),
+            StreamConfig.ConsumerType.LOWLEVEL.toString());
+    streamConfigMap.put(StreamConfigProperties
+            .constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_FACTORY_CLASS),
+        KinesisConsumerFactory.class.getName());
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_DECODER_CLASS),

Review comment:
       lets make this use the new JsonDecoder from your other PR

##########
File path: 
pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/RealtimeKinesisIntegrationTest.java
##########
@@ -0,0 +1,448 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.integration.tests;
+
+import cloud.localstack.Localstack;
+import cloud.localstack.docker.annotation.LocalstackDockerAnnotationProcessor;
+import cloud.localstack.docker.annotation.LocalstackDockerConfiguration;
+import cloud.localstack.docker.annotation.LocalstackDockerProperties;
+import cloud.localstack.docker.command.Command;
+import cloud.localstack.docker.exception.LocalstackDockerException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.JsonNodeType;
+import com.google.common.base.Function;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import javax.activation.UnsupportedDataTypeException;
+import javax.annotation.Nullable;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.pinot.client.Request;
+import org.apache.pinot.client.ResultSet;
+import org.apache.pinot.common.utils.StringUtil;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConfig;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConsumerFactory;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.stream.StreamConfig;
+import org.apache.pinot.spi.stream.StreamConfigProperties;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.builder.TableConfigBuilder;
+import org.apache.pinot.util.TestUtils;
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.http.SdkHttpConfigurationOption;
+import software.amazon.awssdk.http.apache.ApacheSdkHttpService;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.kinesis.KinesisClient;
+import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordResponse;
+import software.amazon.awssdk.services.kinesis.model.ShardIteratorType;
+import software.amazon.awssdk.utils.AttributeMap;
+
+import static org.awaitility.Awaitility.await;
+
+
+@LocalstackDockerProperties(services = {"kinesis", "dynamodb"})
+public class RealtimeKinesisIntegrationTest extends 
BaseClusterIntegrationTestSet {
+  private static final LocalstackDockerAnnotationProcessor PROCESSOR = new 
LocalstackDockerAnnotationProcessor();
+  private static final String STREAM_NAME = "kinesis-test";
+  private static final String STREAM_TYPE = "kinesis";
+  public static final int MAX_RECORDS_TO_FETCH = 2000;
+
+  public static final String REGION = "us-east-1";
+  public static final String LOCALSTACK_KINESIS_ENDPOINT = 
"http://localhost:4566";;
+  public static final int NUM_SHARDS = 10;
+
+  public static final String SCHEMA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.schema";
+  public static final String DATA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.json";
+
+  private final Localstack localstackDocker = Localstack.INSTANCE;
+
+  private static KinesisClient kinesisClient = null;
+
+  private long totalRecordsPushedInStream = 0;
+
+  List<String> h2FieldNameAndTypes = new ArrayList<>();
+
+  @BeforeClass
+  public void setUp()
+      throws Exception {
+    TestUtils.ensureDirectoriesExistAndEmpty(_tempDir);
+
+    // Start the Pinot cluster
+    startZk();
+    startController();
+    startBroker();
+    startServer();
+
+    // Start Kinesis
+    startKinesis();
+
+    // Create and upload the schema and table config
+    addSchema(createKinesisSchema());
+    addTableConfig(createKinesisTableConfig());
+
+    createH2ConnectionAndTable();
+
+    // Push data into Kinesis
+    publishRecordsToKinesis();
+
+    // Wait for all documents loaded
+    waitForAllDocsLoadedKinesis(60_000L);
+  }
+
+  public Schema createKinesisSchema()
+      throws Exception {
+    URL resourceUrl = 
BaseClusterIntegrationTest.class.getClassLoader().getResource(SCHEMA_FILE_PATH);
+    Assert.assertNotNull(resourceUrl);
+    return Schema.fromFile(new File(resourceUrl.getFile()));
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs)
+      throws Exception {
+    waitForAllDocsLoadedKinesis(timeoutMs, true);
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs, boolean 
raiseError) {
+    TestUtils.waitForCondition(new Function<Void, Boolean>() {
+      @Nullable
+      @Override
+      public Boolean apply(@Nullable Void aVoid) {
+        try {
+          return getCurrentCountStarResult() >= totalRecordsPushedInStream;
+        } catch (Exception e) {
+          return null;
+        }
+      }
+    }, 1000L, timeoutMs, "Failed to load " + totalRecordsPushedInStream + " 
documents", raiseError);
+  }
+
+  public TableConfig createKinesisTableConfig() {
+    return new 
TableConfigBuilder(TableType.REALTIME).setTableName(getTableName()).setSchemaName(getTableName())
+        
.setTimeColumnName("DaysSinceEpoch").setFieldConfigList(getFieldConfigs()).setNumReplicas(getNumReplicas())
+        
.setSegmentVersion(getSegmentVersion()).setLoadMode(getLoadMode()).setTaskConfig(getTaskConfig())
+        
.setBrokerTenant(getBrokerTenant()).setServerTenant(getServerTenant()).setIngestionConfig(getIngestionConfig())
+        
.setLLC(true).setStreamConfigs(createKinesisStreamConfig()).setNullHandlingEnabled(getNullHandlingEnabled())
+        .build();
+  }
+
+  public Map<String, String> createKinesisStreamConfig() {
+    Map<String, String> streamConfigMap = new HashMap<>();
+    String streamType = "kinesis";
+    streamConfigMap.put(StreamConfigProperties.STREAM_TYPE, streamType);
+
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_TOPIC_NAME),
+            STREAM_NAME);
+
+    streamConfigMap.put(
+        StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_FETCH_TIMEOUT_MILLIS),
+        "30000");
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_TYPES),
+            StreamConfig.ConsumerType.LOWLEVEL.toString());
+    streamConfigMap.put(StreamConfigProperties
+            .constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_FACTORY_CLASS),
+        KinesisConsumerFactory.class.getName());
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_DECODER_CLASS),
+            "org.apache.pinot.plugin.stream.kafka.KafkaJSONMessageDecoder");
+    streamConfigMap.put(KinesisConfig.REGION, REGION);
+    streamConfigMap.put(KinesisConfig.MAX_RECORDS_TO_FETCH, 
String.valueOf(MAX_RECORDS_TO_FETCH));
+    streamConfigMap.put(KinesisConfig.SHARD_ITERATOR_TYPE, 
ShardIteratorType.AT_SEQUENCE_NUMBER.toString());
+    streamConfigMap.put(KinesisConfig.ENDPOINT, LOCALSTACK_KINESIS_ENDPOINT);
+    streamConfigMap.put(KinesisConfig.ACCESS_KEY, 
getLocalAWSCredentials().resolveCredentials().accessKeyId());
+    streamConfigMap.put(KinesisConfig.SECRET_KEY, 
getLocalAWSCredentials().resolveCredentials().secretAccessKey());
+    streamConfigMap.put(StreamConfigProperties.SEGMENT_FLUSH_THRESHOLD_ROWS, 
Integer.toString(5000));

Review comment:
       as we were discussing offline, can we make this < 2000/NUM_SHARDS, so 
that a segment completion path is also tested here?

##########
File path: 
pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/RealtimeKinesisIntegrationTest.java
##########
@@ -0,0 +1,448 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.integration.tests;
+
+import cloud.localstack.Localstack;
+import cloud.localstack.docker.annotation.LocalstackDockerAnnotationProcessor;
+import cloud.localstack.docker.annotation.LocalstackDockerConfiguration;
+import cloud.localstack.docker.annotation.LocalstackDockerProperties;
+import cloud.localstack.docker.command.Command;
+import cloud.localstack.docker.exception.LocalstackDockerException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.JsonNodeType;
+import com.google.common.base.Function;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import javax.activation.UnsupportedDataTypeException;
+import javax.annotation.Nullable;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.pinot.client.Request;
+import org.apache.pinot.client.ResultSet;
+import org.apache.pinot.common.utils.StringUtil;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConfig;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConsumerFactory;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.stream.StreamConfig;
+import org.apache.pinot.spi.stream.StreamConfigProperties;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.builder.TableConfigBuilder;
+import org.apache.pinot.util.TestUtils;
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.http.SdkHttpConfigurationOption;
+import software.amazon.awssdk.http.apache.ApacheSdkHttpService;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.kinesis.KinesisClient;
+import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordResponse;
+import software.amazon.awssdk.services.kinesis.model.ShardIteratorType;
+import software.amazon.awssdk.utils.AttributeMap;
+
+import static org.awaitility.Awaitility.await;
+
+
+@LocalstackDockerProperties(services = {"kinesis", "dynamodb"})
+public class RealtimeKinesisIntegrationTest extends 
BaseClusterIntegrationTestSet {
+  private static final LocalstackDockerAnnotationProcessor PROCESSOR = new 
LocalstackDockerAnnotationProcessor();
+  private static final String STREAM_NAME = "kinesis-test";
+  private static final String STREAM_TYPE = "kinesis";
+  public static final int MAX_RECORDS_TO_FETCH = 2000;
+
+  public static final String REGION = "us-east-1";
+  public static final String LOCALSTACK_KINESIS_ENDPOINT = 
"http://localhost:4566";;
+  public static final int NUM_SHARDS = 10;
+
+  public static final String SCHEMA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.schema";
+  public static final String DATA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.json";
+
+  private final Localstack localstackDocker = Localstack.INSTANCE;
+
+  private static KinesisClient kinesisClient = null;
+
+  private long totalRecordsPushedInStream = 0;
+
+  List<String> h2FieldNameAndTypes = new ArrayList<>();
+
+  @BeforeClass
+  public void setUp()
+      throws Exception {
+    TestUtils.ensureDirectoriesExistAndEmpty(_tempDir);
+
+    // Start the Pinot cluster
+    startZk();
+    startController();
+    startBroker();
+    startServer();
+
+    // Start Kinesis
+    startKinesis();
+
+    // Create and upload the schema and table config
+    addSchema(createKinesisSchema());
+    addTableConfig(createKinesisTableConfig());
+
+    createH2ConnectionAndTable();
+
+    // Push data into Kinesis
+    publishRecordsToKinesis();
+
+    // Wait for all documents loaded
+    waitForAllDocsLoadedKinesis(60_000L);
+  }
+
+  public Schema createKinesisSchema()
+      throws Exception {
+    URL resourceUrl = 
BaseClusterIntegrationTest.class.getClassLoader().getResource(SCHEMA_FILE_PATH);
+    Assert.assertNotNull(resourceUrl);
+    return Schema.fromFile(new File(resourceUrl.getFile()));
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs)
+      throws Exception {
+    waitForAllDocsLoadedKinesis(timeoutMs, true);
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs, boolean 
raiseError) {
+    TestUtils.waitForCondition(new Function<Void, Boolean>() {
+      @Nullable
+      @Override
+      public Boolean apply(@Nullable Void aVoid) {
+        try {
+          return getCurrentCountStarResult() >= totalRecordsPushedInStream;
+        } catch (Exception e) {
+          return null;
+        }
+      }
+    }, 1000L, timeoutMs, "Failed to load " + totalRecordsPushedInStream + " 
documents", raiseError);
+  }
+
+  public TableConfig createKinesisTableConfig() {
+    return new 
TableConfigBuilder(TableType.REALTIME).setTableName(getTableName()).setSchemaName(getTableName())
+        
.setTimeColumnName("DaysSinceEpoch").setFieldConfigList(getFieldConfigs()).setNumReplicas(getNumReplicas())
+        
.setSegmentVersion(getSegmentVersion()).setLoadMode(getLoadMode()).setTaskConfig(getTaskConfig())
+        
.setBrokerTenant(getBrokerTenant()).setServerTenant(getServerTenant()).setIngestionConfig(getIngestionConfig())
+        
.setLLC(true).setStreamConfigs(createKinesisStreamConfig()).setNullHandlingEnabled(getNullHandlingEnabled())
+        .build();
+  }
+
+  public Map<String, String> createKinesisStreamConfig() {
+    Map<String, String> streamConfigMap = new HashMap<>();
+    String streamType = "kinesis";
+    streamConfigMap.put(StreamConfigProperties.STREAM_TYPE, streamType);
+
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_TOPIC_NAME),
+            STREAM_NAME);
+
+    streamConfigMap.put(
+        StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_FETCH_TIMEOUT_MILLIS),
+        "30000");
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_TYPES),
+            StreamConfig.ConsumerType.LOWLEVEL.toString());
+    streamConfigMap.put(StreamConfigProperties
+            .constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_FACTORY_CLASS),
+        KinesisConsumerFactory.class.getName());
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_DECODER_CLASS),
+            "org.apache.pinot.plugin.stream.kafka.KafkaJSONMessageDecoder");
+    streamConfigMap.put(KinesisConfig.REGION, REGION);
+    streamConfigMap.put(KinesisConfig.MAX_RECORDS_TO_FETCH, 
String.valueOf(MAX_RECORDS_TO_FETCH));
+    streamConfigMap.put(KinesisConfig.SHARD_ITERATOR_TYPE, 
ShardIteratorType.AT_SEQUENCE_NUMBER.toString());
+    streamConfigMap.put(KinesisConfig.ENDPOINT, LOCALSTACK_KINESIS_ENDPOINT);
+    streamConfigMap.put(KinesisConfig.ACCESS_KEY, 
getLocalAWSCredentials().resolveCredentials().accessKeyId());
+    streamConfigMap.put(KinesisConfig.SECRET_KEY, 
getLocalAWSCredentials().resolveCredentials().secretAccessKey());
+    streamConfigMap.put(StreamConfigProperties.SEGMENT_FLUSH_THRESHOLD_ROWS, 
Integer.toString(5000));
+    streamConfigMap.put(StreamConfigProperties
+        .constructStreamProperty(streamType, 
StreamConfigProperties.STREAM_CONSUMER_OFFSET_CRITERIA), "smallest");
+    return streamConfigMap;
+  }
+
+  public void startKinesis()
+      throws Exception {
+    try {
+      final LocalstackDockerConfiguration dockerConfig = 
PROCESSOR.process(this.getClass());
+      localstackDocker.startup(dockerConfig);
+    } catch (LocalstackDockerException e) {
+      StopAllLocalstackDockerCommand stopAllLocalstackDockerCommand = new 
StopAllLocalstackDockerCommand();
+      stopAllLocalstackDockerCommand.execute();
+
+      final LocalstackDockerConfiguration dockerConfig = 
PROCESSOR.process(this.getClass());

Review comment:
       why trying to startup again in the catch block?

##########
File path: 
pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/RealtimeKinesisIntegrationTest.java
##########
@@ -0,0 +1,448 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.integration.tests;
+
+import cloud.localstack.Localstack;
+import cloud.localstack.docker.annotation.LocalstackDockerAnnotationProcessor;
+import cloud.localstack.docker.annotation.LocalstackDockerConfiguration;
+import cloud.localstack.docker.annotation.LocalstackDockerProperties;
+import cloud.localstack.docker.command.Command;
+import cloud.localstack.docker.exception.LocalstackDockerException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.JsonNodeType;
+import com.google.common.base.Function;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import javax.activation.UnsupportedDataTypeException;
+import javax.annotation.Nullable;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.pinot.client.Request;
+import org.apache.pinot.client.ResultSet;
+import org.apache.pinot.common.utils.StringUtil;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConfig;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConsumerFactory;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.stream.StreamConfig;
+import org.apache.pinot.spi.stream.StreamConfigProperties;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.builder.TableConfigBuilder;
+import org.apache.pinot.util.TestUtils;
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.http.SdkHttpConfigurationOption;
+import software.amazon.awssdk.http.apache.ApacheSdkHttpService;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.kinesis.KinesisClient;
+import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordResponse;
+import software.amazon.awssdk.services.kinesis.model.ShardIteratorType;
+import software.amazon.awssdk.utils.AttributeMap;
+
+import static org.awaitility.Awaitility.await;
+
+
+@LocalstackDockerProperties(services = {"kinesis", "dynamodb"})
+public class RealtimeKinesisIntegrationTest extends 
BaseClusterIntegrationTestSet {
+  private static final LocalstackDockerAnnotationProcessor PROCESSOR = new 
LocalstackDockerAnnotationProcessor();
+  private static final String STREAM_NAME = "kinesis-test";
+  private static final String STREAM_TYPE = "kinesis";
+  public static final int MAX_RECORDS_TO_FETCH = 2000;
+
+  public static final String REGION = "us-east-1";
+  public static final String LOCALSTACK_KINESIS_ENDPOINT = 
"http://localhost:4566";;
+  public static final int NUM_SHARDS = 10;
+
+  public static final String SCHEMA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.schema";
+  public static final String DATA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.json";
+
+  private final Localstack localstackDocker = Localstack.INSTANCE;
+
+  private static KinesisClient kinesisClient = null;
+
+  private long totalRecordsPushedInStream = 0;
+
+  List<String> h2FieldNameAndTypes = new ArrayList<>();
+
+  @BeforeClass
+  public void setUp()
+      throws Exception {
+    TestUtils.ensureDirectoriesExistAndEmpty(_tempDir);
+
+    // Start the Pinot cluster
+    startZk();
+    startController();
+    startBroker();
+    startServer();
+
+    // Start Kinesis
+    startKinesis();
+
+    // Create and upload the schema and table config
+    addSchema(createKinesisSchema());
+    addTableConfig(createKinesisTableConfig());
+
+    createH2ConnectionAndTable();
+
+    // Push data into Kinesis
+    publishRecordsToKinesis();
+
+    // Wait for all documents loaded
+    waitForAllDocsLoadedKinesis(60_000L);
+  }
+
+  public Schema createKinesisSchema()
+      throws Exception {
+    URL resourceUrl = 
BaseClusterIntegrationTest.class.getClassLoader().getResource(SCHEMA_FILE_PATH);
+    Assert.assertNotNull(resourceUrl);
+    return Schema.fromFile(new File(resourceUrl.getFile()));
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs)
+      throws Exception {
+    waitForAllDocsLoadedKinesis(timeoutMs, true);
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs, boolean 
raiseError) {
+    TestUtils.waitForCondition(new Function<Void, Boolean>() {
+      @Nullable
+      @Override
+      public Boolean apply(@Nullable Void aVoid) {
+        try {
+          return getCurrentCountStarResult() >= totalRecordsPushedInStream;
+        } catch (Exception e) {
+          return null;
+        }
+      }
+    }, 1000L, timeoutMs, "Failed to load " + totalRecordsPushedInStream + " 
documents", raiseError);
+  }
+
+  public TableConfig createKinesisTableConfig() {
+    return new 
TableConfigBuilder(TableType.REALTIME).setTableName(getTableName()).setSchemaName(getTableName())
+        
.setTimeColumnName("DaysSinceEpoch").setFieldConfigList(getFieldConfigs()).setNumReplicas(getNumReplicas())
+        
.setSegmentVersion(getSegmentVersion()).setLoadMode(getLoadMode()).setTaskConfig(getTaskConfig())
+        
.setBrokerTenant(getBrokerTenant()).setServerTenant(getServerTenant()).setIngestionConfig(getIngestionConfig())
+        
.setLLC(true).setStreamConfigs(createKinesisStreamConfig()).setNullHandlingEnabled(getNullHandlingEnabled())
+        .build();
+  }
+
+  public Map<String, String> createKinesisStreamConfig() {
+    Map<String, String> streamConfigMap = new HashMap<>();
+    String streamType = "kinesis";
+    streamConfigMap.put(StreamConfigProperties.STREAM_TYPE, streamType);
+
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_TOPIC_NAME),
+            STREAM_NAME);
+
+    streamConfigMap.put(
+        StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_FETCH_TIMEOUT_MILLIS),
+        "30000");
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_TYPES),
+            StreamConfig.ConsumerType.LOWLEVEL.toString());
+    streamConfigMap.put(StreamConfigProperties
+            .constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_FACTORY_CLASS),
+        KinesisConsumerFactory.class.getName());
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_DECODER_CLASS),
+            "org.apache.pinot.plugin.stream.kafka.KafkaJSONMessageDecoder");
+    streamConfigMap.put(KinesisConfig.REGION, REGION);
+    streamConfigMap.put(KinesisConfig.MAX_RECORDS_TO_FETCH, 
String.valueOf(MAX_RECORDS_TO_FETCH));
+    streamConfigMap.put(KinesisConfig.SHARD_ITERATOR_TYPE, 
ShardIteratorType.AT_SEQUENCE_NUMBER.toString());
+    streamConfigMap.put(KinesisConfig.ENDPOINT, LOCALSTACK_KINESIS_ENDPOINT);
+    streamConfigMap.put(KinesisConfig.ACCESS_KEY, 
getLocalAWSCredentials().resolveCredentials().accessKeyId());
+    streamConfigMap.put(KinesisConfig.SECRET_KEY, 
getLocalAWSCredentials().resolveCredentials().secretAccessKey());
+    streamConfigMap.put(StreamConfigProperties.SEGMENT_FLUSH_THRESHOLD_ROWS, 
Integer.toString(5000));
+    streamConfigMap.put(StreamConfigProperties
+        .constructStreamProperty(streamType, 
StreamConfigProperties.STREAM_CONSUMER_OFFSET_CRITERIA), "smallest");
+    return streamConfigMap;
+  }
+
+  public void startKinesis()
+      throws Exception {
+    try {
+      final LocalstackDockerConfiguration dockerConfig = 
PROCESSOR.process(this.getClass());
+      localstackDocker.startup(dockerConfig);
+    } catch (LocalstackDockerException e) {
+      StopAllLocalstackDockerCommand stopAllLocalstackDockerCommand = new 
StopAllLocalstackDockerCommand();
+      stopAllLocalstackDockerCommand.execute();
+
+      final LocalstackDockerConfiguration dockerConfig = 
PROCESSOR.process(this.getClass());
+      localstackDocker.startup(dockerConfig);
+    }
+
+    kinesisClient = KinesisClient.builder().httpClient(new 
ApacheSdkHttpService().createHttpClientBuilder()
+        .buildWithDefaults(
+            
AttributeMap.builder().put(SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES, 
Boolean.TRUE).build()))
+        
.credentialsProvider(getLocalAWSCredentials()).region(Region.of(REGION))
+        .endpointOverride(new URI(LOCALSTACK_KINESIS_ENDPOINT)).build();
+
+    
kinesisClient.createStream(CreateStreamRequest.builder().streamName(STREAM_NAME).shardCount(NUM_SHARDS).build());
+    await().until(() -> 
kinesisClient.describeStream(DescribeStreamRequest.builder().streamName(STREAM_NAME).build())
+        .streamDescription().streamStatusAsString().equals("ACTIVE"));
+  }
+
+  public void stopKinesis() {
+    if (localstackDocker.isRunning()) {
+      localstackDocker.stop();
+    }
+  }
+
+  private void publishRecordsToKinesis() {
+    try {
+      StringBuilder params = new StringBuilder("?");
+      for (int i = 0; i < h2FieldNameAndTypes.size() - 1; i++) {
+        params.append(",?");
+      }
+      PreparedStatement h2Statement =
+          _h2Connection.prepareStatement("INSERT INTO " + getTableName() + " 
VALUES (" + params.toString() + ")");
+
+      InputStream inputStream =
+          
RealtimeKinesisIntegrationTest.class.getClassLoader().getResourceAsStream(DATA_FILE_PATH);
+
+      try (BufferedReader br = new BufferedReader(new 
InputStreamReader(inputStream, StandardCharsets.UTF_8))) {
+        String line;
+        while ((line = br.readLine()) != null) {
+          JsonNode data = JsonUtils.stringToJsonNode(line);
+
+          PutRecordRequest putRecordRequest =
+              
PutRecordRequest.builder().streamName(STREAM_NAME).data(SdkBytes.fromUtf8String(line))
+                  .partitionKey(data.get("Origin").textValue()).build();
+          PutRecordResponse putRecordResponse = 
kinesisClient.putRecord(putRecordRequest);
+          if (putRecordResponse.sdkHttpResponse().statusCode() == 200) {
+            if (StringUtils.isNotBlank(putRecordResponse.sequenceNumber()) && 
StringUtils
+                .isNotBlank(putRecordResponse.shardId())) {
+              totalRecordsPushedInStream++;
+
+              int fieldIndex = 1;
+              for (String fieldNameAndDatatype : h2FieldNameAndTypes) {
+                String[] fieldNameAndDatatypeList = 
fieldNameAndDatatype.split(" ");
+                String fieldName = fieldNameAndDatatypeList[0];
+                String h2DataType = fieldNameAndDatatypeList[1];
+                switch (h2DataType) {
+                  case "int": {
+                    h2Statement.setObject(fieldIndex++, 
data.get(fieldName).intValue());
+                    break;
+                  }
+                  case "varchar(128)": {
+                    h2Statement.setObject(fieldIndex++, 
data.get(fieldName).textValue());
+                    break;
+                  }
+                  default: {
+
+                  }
+                }
+              }
+              h2Statement.execute();
+            }
+          }
+        }
+      }
+
+      inputStream.close();
+    } catch (Exception e) {
+      throw new RuntimeException("Could not publish records to Kinesis 
Stream", e);
+    }
+  }
+
+  private static AwsCredentialsProvider getLocalAWSCredentials() {
+    return 
StaticCredentialsProvider.create(AwsBasicCredentials.create("access", 
"secret"));
+  }
+
+  @Test
+  public void testRecords()
+      throws Exception {
+    Assert.assertNotEquals(totalRecordsPushedInStream, 0);
+
+    ResultSet pinotResultSet = getPinotConnection()
+        .execute(new Request("sql", "SELECT * FROM " + getTableName() + " 
ORDER BY Origin LIMIT 10000"))
+        .getResultSet(0);
+
+    Assert.assertNotEquals(pinotResultSet.getRowCount(), 0);
+
+    Statement h2statement =
+        _h2Connection.createStatement(java.sql.ResultSet.TYPE_FORWARD_ONLY, 
java.sql.ResultSet.CONCUR_READ_ONLY);
+    h2statement.execute("SELECT * FROM " + getTableName() + " ORDER BY 
Origin");
+    java.sql.ResultSet h2ResultSet = h2statement.getResultSet();
+
+    Assert.assertFalse(h2ResultSet.isLast());
+
+    h2ResultSet.beforeFirst();
+    int row = 0;
+    Map<String, Integer> columnToIndex = new HashMap<>();
+    for (int i = 0; i < h2FieldNameAndTypes.size(); i++) {
+      columnToIndex.put(pinotResultSet.getColumnName(i), i);
+    }
+
+    while (h2ResultSet.next()) {
+
+      for (String fieldNameAndDatatype : h2FieldNameAndTypes) {
+        String[] fieldNameAndDatatypeList = fieldNameAndDatatype.split(" ");
+        String fieldName = fieldNameAndDatatypeList[0];
+        String h2DataType = fieldNameAndDatatypeList[1];
+        switch (h2DataType) {
+          case "int": {
+            int expectedValue = h2ResultSet.getInt(fieldName);
+            int actualValue = pinotResultSet.getInt(row, 
columnToIndex.get(fieldName));
+            Assert.assertEquals(expectedValue, actualValue);
+            break;
+          }
+          case "varchar(128)": {
+            String expectedValue = h2ResultSet.getString(fieldName);
+            String actualValue = pinotResultSet.getString(row, 
columnToIndex.get(fieldName));
+            Assert.assertEquals(expectedValue, actualValue);
+            break;
+          }
+          default: {

Review comment:
       same. remove or throw/log exception?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org
For additional commands, e-mail: commits-h...@pinot.apache.org

Reply via email to