FranMorilloAWS commented on issue #9869:
URL: https://github.com/apache/pinot/issues/9869#issuecomment-1674672985

   Yes i modified the controller.vip.host to point to the loadbalancer of the 
controllers and it worked. However I am facing now an issue with that with the 
current Table Configuration, once the segments go from consuming to Good, the 
servers are not creating new segments to continue consuming from the kinesis 
data stream. 
   
   As well as ignoring the number of rows, or size for the segments.
   
   Ill add my table configuration:
   
   {
     "REALTIME": {
       "tableName": "kinesisTable_REALTIME",
       "tableType": "REALTIME",
       "segmentsConfig": {
         "replication": "2",
         "retentionTimeUnit": "DAYS",
         "retentionTimeValue": "7",
         "replicasPerPartition": "2",
         "minimizeDataMovement": false,
         "timeColumnName": "creationTimestamp",
         "segmentPushType": "APPEND",
         "completionConfig": {
           "completionMode": "DOWNLOAD"
         }
       },
       "tenants": {
         "broker": "DefaultTenant",
         "server": "DefaultTenant"
       },
       "tableIndexConfig": {
         "invertedIndexColumns": [
           "product"
         ],
         "noDictionaryColumns": [
           "price"
         ],
         "rangeIndexVersion": 2,
         "autoGeneratedInvertedIndex": false,
         "createInvertedIndexDuringSegmentGeneration": false,
         "sortedColumn": [
           "creationTimestamp"
         ],
         "loadMode": "MMAP",
         "streamConfigs": {
           "streamType": "kinesis",
           "stream.kinesis.topic.name": "pinot-stream",
           "region": "eu-west-1",
           "shardIteratorType": "LATEST",
           "stream.kinesis.consumer.type": "lowlevel",
           "stream.kinesis.fetch.timeout.millis": "30000",
           "stream.kinesis.decoder.class.name": 
"org.apache.pinot.plugin.stream.kafka.KafkaJSONMessageDecoder",
           "stream.kinesis.consumer.factory.class.name": 
"org.apache.pinot.plugin.stream.kinesis.KinesisConsumerFactory",
           "realtime.segment.flush.threshold.rows": "1400000",
           "realtime.segment.flush.threshold.time": "1h",
           "realtime.segment.flush.threshold.size": "200M"
         },
         "varLengthDictionaryColumns": [
           "campaign",
           "color",
           "department"
         ],
         "enableDefaultStarTree": false,
         "enableDynamicStarTreeCreation": false,
         "aggregateMetrics": false,
         "nullHandlingEnabled": false,
         "optimizeDictionary": false,
         "optimizeDictionaryForMetrics": false,
         "noDictionarySizeRatioThreshold": 0
       },
       "metadata": {
         "customConfigs": {}
       },
       "isDimTable": false
     }
   }
   
   
   The table has four segments that are in Good State and are in S3. It just 
stops creating consuming segments and stops reading from kinesis
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org
For additional commands, e-mail: commits-h...@pinot.apache.org

Reply via email to