KKcorps commented on a change in pull request #7016:
URL: https://github.com/apache/incubator-pinot/pull/7016#discussion_r645826213



##########
File path: 
pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/RealtimeKinesisIntegrationTest.java
##########
@@ -0,0 +1,448 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.integration.tests;
+
+import cloud.localstack.Localstack;
+import cloud.localstack.docker.annotation.LocalstackDockerAnnotationProcessor;
+import cloud.localstack.docker.annotation.LocalstackDockerConfiguration;
+import cloud.localstack.docker.annotation.LocalstackDockerProperties;
+import cloud.localstack.docker.command.Command;
+import cloud.localstack.docker.exception.LocalstackDockerException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.JsonNodeType;
+import com.google.common.base.Function;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import javax.activation.UnsupportedDataTypeException;
+import javax.annotation.Nullable;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.pinot.client.Request;
+import org.apache.pinot.client.ResultSet;
+import org.apache.pinot.common.utils.StringUtil;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConfig;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConsumerFactory;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.stream.StreamConfig;
+import org.apache.pinot.spi.stream.StreamConfigProperties;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.builder.TableConfigBuilder;
+import org.apache.pinot.util.TestUtils;
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.http.SdkHttpConfigurationOption;
+import software.amazon.awssdk.http.apache.ApacheSdkHttpService;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.kinesis.KinesisClient;
+import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordResponse;
+import software.amazon.awssdk.services.kinesis.model.ShardIteratorType;
+import software.amazon.awssdk.utils.AttributeMap;
+
+import static org.awaitility.Awaitility.await;
+
+
+@LocalstackDockerProperties(services = {"kinesis", "dynamodb"})
+public class RealtimeKinesisIntegrationTest extends 
BaseClusterIntegrationTestSet {
+  private static final LocalstackDockerAnnotationProcessor PROCESSOR = new 
LocalstackDockerAnnotationProcessor();
+  private static final String STREAM_NAME = "kinesis-test";
+  private static final String STREAM_TYPE = "kinesis";
+  public static final int MAX_RECORDS_TO_FETCH = 2000;
+
+  public static final String REGION = "us-east-1";
+  public static final String LOCALSTACK_KINESIS_ENDPOINT = 
"http://localhost:4566";;
+  public static final int NUM_SHARDS = 10;
+
+  public static final String SCHEMA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.schema";
+  public static final String DATA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.json";
+
+  private final Localstack localstackDocker = Localstack.INSTANCE;
+
+  private static KinesisClient kinesisClient = null;
+
+  private long totalRecordsPushedInStream = 0;
+
+  List<String> h2FieldNameAndTypes = new ArrayList<>();
+
+  @BeforeClass
+  public void setUp()
+      throws Exception {
+    TestUtils.ensureDirectoriesExistAndEmpty(_tempDir);
+
+    // Start the Pinot cluster
+    startZk();
+    startController();
+    startBroker();
+    startServer();
+
+    // Start Kinesis
+    startKinesis();
+
+    // Create and upload the schema and table config
+    addSchema(createKinesisSchema());
+    addTableConfig(createKinesisTableConfig());
+
+    createH2ConnectionAndTable();
+
+    // Push data into Kinesis
+    publishRecordsToKinesis();
+
+    // Wait for all documents loaded
+    waitForAllDocsLoadedKinesis(60_000L);
+  }
+
+  public Schema createKinesisSchema()
+      throws Exception {
+    URL resourceUrl = 
BaseClusterIntegrationTest.class.getClassLoader().getResource(SCHEMA_FILE_PATH);
+    Assert.assertNotNull(resourceUrl);
+    return Schema.fromFile(new File(resourceUrl.getFile()));
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs)
+      throws Exception {
+    waitForAllDocsLoadedKinesis(timeoutMs, true);
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs, boolean 
raiseError) {
+    TestUtils.waitForCondition(new Function<Void, Boolean>() {
+      @Nullable
+      @Override
+      public Boolean apply(@Nullable Void aVoid) {
+        try {
+          return getCurrentCountStarResult() >= totalRecordsPushedInStream;
+        } catch (Exception e) {
+          return null;
+        }
+      }
+    }, 1000L, timeoutMs, "Failed to load " + totalRecordsPushedInStream + " 
documents", raiseError);
+  }
+
+  public TableConfig createKinesisTableConfig() {
+    return new 
TableConfigBuilder(TableType.REALTIME).setTableName(getTableName()).setSchemaName(getTableName())
+        
.setTimeColumnName("DaysSinceEpoch").setFieldConfigList(getFieldConfigs()).setNumReplicas(getNumReplicas())
+        
.setSegmentVersion(getSegmentVersion()).setLoadMode(getLoadMode()).setTaskConfig(getTaskConfig())
+        
.setBrokerTenant(getBrokerTenant()).setServerTenant(getServerTenant()).setIngestionConfig(getIngestionConfig())
+        
.setLLC(true).setStreamConfigs(createKinesisStreamConfig()).setNullHandlingEnabled(getNullHandlingEnabled())
+        .build();
+  }
+
+  public Map<String, String> createKinesisStreamConfig() {
+    Map<String, String> streamConfigMap = new HashMap<>();
+    String streamType = "kinesis";
+    streamConfigMap.put(StreamConfigProperties.STREAM_TYPE, streamType);
+
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_TOPIC_NAME),
+            STREAM_NAME);
+
+    streamConfigMap.put(
+        StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_FETCH_TIMEOUT_MILLIS),
+        "30000");
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_TYPES),
+            StreamConfig.ConsumerType.LOWLEVEL.toString());
+    streamConfigMap.put(StreamConfigProperties
+            .constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_FACTORY_CLASS),
+        KinesisConsumerFactory.class.getName());
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_DECODER_CLASS),
+            "org.apache.pinot.plugin.stream.kafka.KafkaJSONMessageDecoder");
+    streamConfigMap.put(KinesisConfig.REGION, REGION);
+    streamConfigMap.put(KinesisConfig.MAX_RECORDS_TO_FETCH, 
String.valueOf(MAX_RECORDS_TO_FETCH));
+    streamConfigMap.put(KinesisConfig.SHARD_ITERATOR_TYPE, 
ShardIteratorType.AT_SEQUENCE_NUMBER.toString());
+    streamConfigMap.put(KinesisConfig.ENDPOINT, LOCALSTACK_KINESIS_ENDPOINT);
+    streamConfigMap.put(KinesisConfig.ACCESS_KEY, 
getLocalAWSCredentials().resolveCredentials().accessKeyId());
+    streamConfigMap.put(KinesisConfig.SECRET_KEY, 
getLocalAWSCredentials().resolveCredentials().secretAccessKey());
+    streamConfigMap.put(StreamConfigProperties.SEGMENT_FLUSH_THRESHOLD_ROWS, 
Integer.toString(5000));
+    streamConfigMap.put(StreamConfigProperties
+        .constructStreamProperty(streamType, 
StreamConfigProperties.STREAM_CONSUMER_OFFSET_CRITERIA), "smallest");
+    return streamConfigMap;
+  }
+
+  public void startKinesis()
+      throws Exception {
+    try {
+      final LocalstackDockerConfiguration dockerConfig = 
PROCESSOR.process(this.getClass());
+      localstackDocker.startup(dockerConfig);
+    } catch (LocalstackDockerException e) {
+      StopAllLocalstackDockerCommand stopAllLocalstackDockerCommand = new 
StopAllLocalstackDockerCommand();
+      stopAllLocalstackDockerCommand.execute();
+
+      final LocalstackDockerConfiguration dockerConfig = 
PROCESSOR.process(this.getClass());

Review comment:
       Sometimes in teardown the localstack docker container is not killed. So 
at the time of running the test again, the localstack startup fails since there 
is already a container running occupying the same ports.
   So here we kill all the running localstack containers and then retry once 
more. If it fails, then the test is terminated.
   
   We can't use the already running localstack container since it may contain 
some data already in the kinesis stream.

##########
File path: 
pinot-integration-tests/src/test/java/org/apache/pinot/integration/tests/RealtimeKinesisIntegrationTest.java
##########
@@ -0,0 +1,448 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pinot.integration.tests;
+
+import cloud.localstack.Localstack;
+import cloud.localstack.docker.annotation.LocalstackDockerAnnotationProcessor;
+import cloud.localstack.docker.annotation.LocalstackDockerConfiguration;
+import cloud.localstack.docker.annotation.LocalstackDockerProperties;
+import cloud.localstack.docker.command.Command;
+import cloud.localstack.docker.exception.LocalstackDockerException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.JsonNodeType;
+import com.google.common.base.Function;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.net.URI;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import javax.activation.UnsupportedDataTypeException;
+import javax.annotation.Nullable;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.pinot.client.Request;
+import org.apache.pinot.client.ResultSet;
+import org.apache.pinot.common.utils.StringUtil;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConfig;
+import org.apache.pinot.plugin.stream.kinesis.KinesisConsumerFactory;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.stream.StreamConfig;
+import org.apache.pinot.spi.stream.StreamConfigProperties;
+import org.apache.pinot.spi.utils.JsonUtils;
+import org.apache.pinot.spi.utils.builder.TableConfigBuilder;
+import org.apache.pinot.util.TestUtils;
+import org.testng.Assert;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.core.SdkBytes;
+import software.amazon.awssdk.http.SdkHttpConfigurationOption;
+import software.amazon.awssdk.http.apache.ApacheSdkHttpService;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.kinesis.KinesisClient;
+import software.amazon.awssdk.services.kinesis.model.CreateStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.DescribeStreamRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordRequest;
+import software.amazon.awssdk.services.kinesis.model.PutRecordResponse;
+import software.amazon.awssdk.services.kinesis.model.ShardIteratorType;
+import software.amazon.awssdk.utils.AttributeMap;
+
+import static org.awaitility.Awaitility.await;
+
+
+@LocalstackDockerProperties(services = {"kinesis", "dynamodb"})
+public class RealtimeKinesisIntegrationTest extends 
BaseClusterIntegrationTestSet {
+  private static final LocalstackDockerAnnotationProcessor PROCESSOR = new 
LocalstackDockerAnnotationProcessor();
+  private static final String STREAM_NAME = "kinesis-test";
+  private static final String STREAM_TYPE = "kinesis";
+  public static final int MAX_RECORDS_TO_FETCH = 2000;
+
+  public static final String REGION = "us-east-1";
+  public static final String LOCALSTACK_KINESIS_ENDPOINT = 
"http://localhost:4566";;
+  public static final int NUM_SHARDS = 10;
+
+  public static final String SCHEMA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.schema";
+  public static final String DATA_FILE_PATH = 
"kinesis/airlineStats_data_reduced.json";
+
+  private final Localstack localstackDocker = Localstack.INSTANCE;
+
+  private static KinesisClient kinesisClient = null;
+
+  private long totalRecordsPushedInStream = 0;
+
+  List<String> h2FieldNameAndTypes = new ArrayList<>();
+
+  @BeforeClass
+  public void setUp()
+      throws Exception {
+    TestUtils.ensureDirectoriesExistAndEmpty(_tempDir);
+
+    // Start the Pinot cluster
+    startZk();
+    startController();
+    startBroker();
+    startServer();
+
+    // Start Kinesis
+    startKinesis();
+
+    // Create and upload the schema and table config
+    addSchema(createKinesisSchema());
+    addTableConfig(createKinesisTableConfig());
+
+    createH2ConnectionAndTable();
+
+    // Push data into Kinesis
+    publishRecordsToKinesis();
+
+    // Wait for all documents loaded
+    waitForAllDocsLoadedKinesis(60_000L);
+  }
+
+  public Schema createKinesisSchema()
+      throws Exception {
+    URL resourceUrl = 
BaseClusterIntegrationTest.class.getClassLoader().getResource(SCHEMA_FILE_PATH);
+    Assert.assertNotNull(resourceUrl);
+    return Schema.fromFile(new File(resourceUrl.getFile()));
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs)
+      throws Exception {
+    waitForAllDocsLoadedKinesis(timeoutMs, true);
+  }
+
+  protected void waitForAllDocsLoadedKinesis(long timeoutMs, boolean 
raiseError) {
+    TestUtils.waitForCondition(new Function<Void, Boolean>() {
+      @Nullable
+      @Override
+      public Boolean apply(@Nullable Void aVoid) {
+        try {
+          return getCurrentCountStarResult() >= totalRecordsPushedInStream;
+        } catch (Exception e) {
+          return null;
+        }
+      }
+    }, 1000L, timeoutMs, "Failed to load " + totalRecordsPushedInStream + " 
documents", raiseError);
+  }
+
+  public TableConfig createKinesisTableConfig() {
+    return new 
TableConfigBuilder(TableType.REALTIME).setTableName(getTableName()).setSchemaName(getTableName())
+        
.setTimeColumnName("DaysSinceEpoch").setFieldConfigList(getFieldConfigs()).setNumReplicas(getNumReplicas())
+        
.setSegmentVersion(getSegmentVersion()).setLoadMode(getLoadMode()).setTaskConfig(getTaskConfig())
+        
.setBrokerTenant(getBrokerTenant()).setServerTenant(getServerTenant()).setIngestionConfig(getIngestionConfig())
+        
.setLLC(true).setStreamConfigs(createKinesisStreamConfig()).setNullHandlingEnabled(getNullHandlingEnabled())
+        .build();
+  }
+
+  public Map<String, String> createKinesisStreamConfig() {
+    Map<String, String> streamConfigMap = new HashMap<>();
+    String streamType = "kinesis";
+    streamConfigMap.put(StreamConfigProperties.STREAM_TYPE, streamType);
+
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_TOPIC_NAME),
+            STREAM_NAME);
+
+    streamConfigMap.put(
+        StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_FETCH_TIMEOUT_MILLIS),
+        "30000");
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_TYPES),
+            StreamConfig.ConsumerType.LOWLEVEL.toString());
+    streamConfigMap.put(StreamConfigProperties
+            .constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_CONSUMER_FACTORY_CLASS),
+        KinesisConsumerFactory.class.getName());
+    streamConfigMap
+        .put(StreamConfigProperties.constructStreamProperty(STREAM_TYPE, 
StreamConfigProperties.STREAM_DECODER_CLASS),
+            "org.apache.pinot.plugin.stream.kafka.KafkaJSONMessageDecoder");
+    streamConfigMap.put(KinesisConfig.REGION, REGION);
+    streamConfigMap.put(KinesisConfig.MAX_RECORDS_TO_FETCH, 
String.valueOf(MAX_RECORDS_TO_FETCH));
+    streamConfigMap.put(KinesisConfig.SHARD_ITERATOR_TYPE, 
ShardIteratorType.AT_SEQUENCE_NUMBER.toString());
+    streamConfigMap.put(KinesisConfig.ENDPOINT, LOCALSTACK_KINESIS_ENDPOINT);
+    streamConfigMap.put(KinesisConfig.ACCESS_KEY, 
getLocalAWSCredentials().resolveCredentials().accessKeyId());
+    streamConfigMap.put(KinesisConfig.SECRET_KEY, 
getLocalAWSCredentials().resolveCredentials().secretAccessKey());
+    streamConfigMap.put(StreamConfigProperties.SEGMENT_FLUSH_THRESHOLD_ROWS, 
Integer.toString(5000));
+    streamConfigMap.put(StreamConfigProperties
+        .constructStreamProperty(streamType, 
StreamConfigProperties.STREAM_CONSUMER_OFFSET_CRITERIA), "smallest");
+    return streamConfigMap;
+  }
+
+  public void startKinesis()
+      throws Exception {
+    try {
+      final LocalstackDockerConfiguration dockerConfig = 
PROCESSOR.process(this.getClass());
+      localstackDocker.startup(dockerConfig);
+    } catch (LocalstackDockerException e) {
+      StopAllLocalstackDockerCommand stopAllLocalstackDockerCommand = new 
StopAllLocalstackDockerCommand();
+      stopAllLocalstackDockerCommand.execute();
+
+      final LocalstackDockerConfiguration dockerConfig = 
PROCESSOR.process(this.getClass());
+      localstackDocker.startup(dockerConfig);
+    }
+
+    kinesisClient = KinesisClient.builder().httpClient(new 
ApacheSdkHttpService().createHttpClientBuilder()
+        .buildWithDefaults(
+            
AttributeMap.builder().put(SdkHttpConfigurationOption.TRUST_ALL_CERTIFICATES, 
Boolean.TRUE).build()))
+        
.credentialsProvider(getLocalAWSCredentials()).region(Region.of(REGION))
+        .endpointOverride(new URI(LOCALSTACK_KINESIS_ENDPOINT)).build();
+
+    
kinesisClient.createStream(CreateStreamRequest.builder().streamName(STREAM_NAME).shardCount(NUM_SHARDS).build());
+    await().until(() -> 
kinesisClient.describeStream(DescribeStreamRequest.builder().streamName(STREAM_NAME).build())
+        .streamDescription().streamStatusAsString().equals("ACTIVE"));
+  }
+
+  public void stopKinesis() {
+    if (localstackDocker.isRunning()) {
+      localstackDocker.stop();
+    }
+  }
+
+  private void publishRecordsToKinesis() {
+    try {
+      StringBuilder params = new StringBuilder("?");
+      for (int i = 0; i < h2FieldNameAndTypes.size() - 1; i++) {
+        params.append(",?");
+      }
+      PreparedStatement h2Statement =
+          _h2Connection.prepareStatement("INSERT INTO " + getTableName() + " 
VALUES (" + params.toString() + ")");
+
+      InputStream inputStream =
+          
RealtimeKinesisIntegrationTest.class.getClassLoader().getResourceAsStream(DATA_FILE_PATH);
+
+      try (BufferedReader br = new BufferedReader(new 
InputStreamReader(inputStream, StandardCharsets.UTF_8))) {
+        String line;
+        while ((line = br.readLine()) != null) {
+          JsonNode data = JsonUtils.stringToJsonNode(line);
+
+          PutRecordRequest putRecordRequest =
+              
PutRecordRequest.builder().streamName(STREAM_NAME).data(SdkBytes.fromUtf8String(line))
+                  .partitionKey(data.get("Origin").textValue()).build();
+          PutRecordResponse putRecordResponse = 
kinesisClient.putRecord(putRecordRequest);
+          if (putRecordResponse.sdkHttpResponse().statusCode() == 200) {
+            if (StringUtils.isNotBlank(putRecordResponse.sequenceNumber()) && 
StringUtils
+                .isNotBlank(putRecordResponse.shardId())) {
+              totalRecordsPushedInStream++;
+
+              int fieldIndex = 1;
+              for (String fieldNameAndDatatype : h2FieldNameAndTypes) {
+                String[] fieldNameAndDatatypeList = 
fieldNameAndDatatype.split(" ");
+                String fieldName = fieldNameAndDatatypeList[0];
+                String h2DataType = fieldNameAndDatatypeList[1];
+                switch (h2DataType) {
+                  case "int": {
+                    h2Statement.setObject(fieldIndex++, 
data.get(fieldName).intValue());
+                    break;
+                  }
+                  case "varchar(128)": {
+                    h2Statement.setObject(fieldIndex++, 
data.get(fieldName).textValue());
+                    break;
+                  }
+                  default: {

Review comment:
       done

##########
File path: 
pinot-plugins/pinot-stream-ingestion/pinot-kinesis/src/main/java/org/apache/pinot/plugin/stream/kinesis/KinesisConnectionHandler.java
##########
@@ -73,17 +79,26 @@ public KinesisConnectionHandler(KinesisConfig 
kinesisConfig, KinesisClient kines
    */
   public void createConnection() {
     if (_kinesisClient == null) {
+      KinesisClientBuilder kinesisClientBuilder;
       if (StringUtils.isNotBlank(_accessKey) && 
StringUtils.isNotBlank(_secretKey)) {
         AwsBasicCredentials awsBasicCredentials = 
AwsBasicCredentials.create(_accessKey, _secretKey);
-        _kinesisClient = KinesisClient.builder().region(Region.of(_region))
+        kinesisClientBuilder = 
KinesisClient.builder().region(Region.of(_region))
             
.credentialsProvider(StaticCredentialsProvider.create(awsBasicCredentials))
-            .httpClientBuilder(new 
ApacheSdkHttpService().createHttpClientBuilder())
-            .build();
+            .httpClientBuilder(new 
ApacheSdkHttpService().createHttpClientBuilder());
       } else {
-        _kinesisClient =
-            
KinesisClient.builder().region(Region.of(_region)).credentialsProvider(DefaultCredentialsProvider.create())
-                .build();
+        kinesisClientBuilder =
+            
KinesisClient.builder().region(Region.of(_region)).credentialsProvider(DefaultCredentialsProvider.create());
       }
+
+      if(StringUtils.isNotBlank(_endpoint)){

Review comment:
       fixed.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org
For additional commands, e-mail: commits-h...@pinot.apache.org

Reply via email to