SabrinaZhaozyf commented on code in PR #9236:
URL: https://github.com/apache/pinot/pull/9236#discussion_r953186422


##########
pinot-core/src/test/java/org/apache/pinot/queries/BaseQueriesTest.java:
##########
@@ -197,4 +197,70 @@ protected BrokerResponseNative 
getBrokerResponseForOptimizedQuery(String query,
     OPTIMIZER.optimize(pinotQuery, config, schema);
     return getBrokerResponse(pinotQuery, PLAN_MAKER);
   }
+

Review Comment:
   Done



##########
pinot-core/src/test/java/org/apache/pinot/queries/CovarianceQueriesTest.java:
##########
@@ -0,0 +1,444 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.pinot.queries;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.math3.stat.correlation.Covariance;
+import org.apache.commons.math3.util.Precision;
+import org.apache.pinot.common.response.broker.BrokerResponseNative;
+import org.apache.pinot.common.response.broker.ResultTable;
+import org.apache.pinot.core.common.Operator;
+import org.apache.pinot.core.operator.blocks.IntermediateResultsBlock;
+import org.apache.pinot.core.operator.query.AggregationGroupByOrderByOperator;
+import org.apache.pinot.core.operator.query.AggregationOperator;
+import 
org.apache.pinot.core.query.aggregation.groupby.AggregationGroupByResult;
+import org.apache.pinot.segment.local.customobject.CovarianceTuple;
+import 
org.apache.pinot.segment.local.indexsegment.immutable.ImmutableSegmentLoader;
+import 
org.apache.pinot.segment.local.segment.creator.impl.SegmentIndexCreationDriverImpl;
+import org.apache.pinot.segment.local.segment.readers.GenericRowRecordReader;
+import org.apache.pinot.segment.spi.ImmutableSegment;
+import org.apache.pinot.segment.spi.IndexSegment;
+import org.apache.pinot.segment.spi.creator.SegmentGeneratorConfig;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.data.FieldSpec;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.data.readers.GenericRow;
+import org.apache.pinot.spi.utils.ReadMode;
+import org.apache.pinot.spi.utils.builder.TableConfigBuilder;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertNotNull;
+import static org.testng.Assert.assertTrue;
+
+
+/**
+ * Queries test for covariance queries.
+ */
+public class CovarianceQueriesTest extends BaseQueriesTest {
+  private static final File INDEX_DIR = new File(FileUtils.getTempDirectory(), 
"CovarianceQueriesTest");
+  private static final String RAW_TABLE_NAME = "testTable";
+  private static final String SEGMENT_NAME = "testSegment";
+
+  // test segments 1-4 evenly divide testSegment into 4 distinct segments
+  private static final String SEGMENT_NAME_1 = "testSegment1";
+  private static final String SEGMENT_NAME_2 = "testSegment2";
+  private static final String SEGMENT_NAME_3 = "testSegment3";
+  private static final String SEGMENT_NAME_4 = "testSegment4";
+
+  private static final int NUM_RECORDS = 2000;
+  private static final int NUM_GROUPS = 10;
+  private static final int MAX_VALUE = 500;
+  private static final double RELATIVE_EPSILON = 0.0001;
+  private static final double DELTA = 0.0001;
+
+  private static final String INT_COLUMN_X = "intColumnX";
+  private static final String INT_COLUMN_Y = "intColumnY";
+  private static final String DOUBLE_COLUMN_X = "doubleColumnX";
+  private static final String DOUBLE_COLUMN_Y = "doubleColumnY";
+  private static final String LONG_COLUMN = "longColumn";
+  private static final String FLOAT_COLUMN = "floatColumn";
+  private static final String GROUP_BY_COLUMN = "groupByColumn";
+
+  private static final Schema SCHEMA =
+      new Schema.SchemaBuilder().addSingleValueDimension(INT_COLUMN_X, 
FieldSpec.DataType.INT)
+          .addSingleValueDimension(INT_COLUMN_Y, FieldSpec.DataType.INT)
+          .addSingleValueDimension(DOUBLE_COLUMN_X, FieldSpec.DataType.DOUBLE)
+          .addSingleValueDimension(DOUBLE_COLUMN_Y, FieldSpec.DataType.DOUBLE)
+          .addSingleValueDimension(LONG_COLUMN, FieldSpec.DataType.LONG)
+          .addSingleValueDimension(FLOAT_COLUMN, FieldSpec.DataType.FLOAT)
+          .addSingleValueDimension(GROUP_BY_COLUMN, 
FieldSpec.DataType.DOUBLE).build();
+  private static final TableConfig TABLE_CONFIG =
+      new 
TableConfigBuilder(TableType.OFFLINE).setTableName(RAW_TABLE_NAME).build();
+
+  private IndexSegment _indexSegment;
+  private List<IndexSegment> _indexSegments;
+  private List<List<IndexSegment>> _instances;
+  private int _sumIntX = 0;
+  private int _sumIntY = 0;
+  private int _sumIntXY = 0;
+
+  private double _sumDoubleX = 0;
+  private double _sumDoubleY = 0;
+  private double _sumDoubleXY = 0;
+
+  private long _sumLong = 0L;
+  private double _sumFloat = 0;
+
+  private double _sumIntDouble = 0;
+  private long _sumIntLong = 0L;
+  private double _sumIntFloat = 0;
+  private double _sumDoubleLong = 0;
+  private double _sumDoubleFloat = 0;
+  private double _sumLongFloat = 0;
+
+  private double _expectedCovIntXY;
+  private double _expectedCovDoubleXY;
+  private double _expectedCovIntDouble;
+  private double _expectedCovIntLong;
+  private double _expectedCovIntFloat;
+  private double _expectedCovDoubleLong;
+  private double _expectedCovDoubleFloat;
+  private double _expectedCovLongFloat;
+
+  private double _expectedCovWithFilter;
+
+  private CovarianceTuple[] _expectedGroupByResultVer1 = new 
CovarianceTuple[NUM_GROUPS];
+  private CovarianceTuple[] _expectedGroupByResultVer2 = new 
CovarianceTuple[NUM_GROUPS];
+  private double[] _expectedFinalResultVer1 = new double[NUM_GROUPS];
+  private double[] _expectedFinalResultVer2 = new double[NUM_GROUPS];
+
+  @Override
+  protected String getFilter() {
+    // filter out half of the rows based on group id
+    return " WHERE groupByColumn < " + (NUM_GROUPS / 2);
+  }
+
+  @Override
+  protected IndexSegment getIndexSegment() {
+    return _indexSegment;
+  }
+
+  @Override
+  protected List<IndexSegment> getIndexSegments() {
+    return _indexSegments;
+  }
+
+  @BeforeClass
+  public void setUp()
+      throws Exception {
+    FileUtils.deleteDirectory(INDEX_DIR);
+
+    List<GenericRow> records = new ArrayList<>(NUM_RECORDS);
+
+    Random rand = new Random();
+    int[] intColX = rand.ints(NUM_RECORDS, -MAX_VALUE, MAX_VALUE).toArray();
+    int[] intColY = rand.ints(NUM_RECORDS, -MAX_VALUE, MAX_VALUE).toArray();
+    double[] doubleColX = rand.doubles(NUM_RECORDS, -MAX_VALUE, 
MAX_VALUE).toArray();
+    double[] doubleColY = rand.doubles(NUM_RECORDS, -MAX_VALUE, 
MAX_VALUE).toArray();
+    long[] longCol = rand.longs(NUM_RECORDS, -MAX_VALUE, MAX_VALUE).toArray();
+    double[] floatCol = new double[NUM_RECORDS];
+    double[] groupByCol = new double[NUM_RECORDS];
+
+    int groupSize = NUM_RECORDS / NUM_GROUPS;
+    double sumX = 0;
+    double sumY = 0;
+    double sumGroupBy = 0;
+    double sumXY = 0;
+    double sumXGroupBy = 0;
+    int groupByVal = 0;
+
+    for (int i = 0; i < NUM_RECORDS; i++) {
+      GenericRow record = new GenericRow();
+      int intX = intColX[i];
+      int intY = intColY[i];
+      double doubleX = doubleColX[i];
+      double doubleY = doubleColY[i];
+      long longVal = longCol[i];
+      float floatVal = -MAX_VALUE + rand.nextFloat() * 2 * MAX_VALUE;
+
+      // set up inner segment group by results
+      groupByVal = (int) Math.floor(i / groupSize);
+      if (i % groupSize == 0 && groupByVal > 0) {
+        _expectedGroupByResultVer1[groupByVal - 1] = new CovarianceTuple(sumX, 
sumGroupBy, sumXGroupBy, groupSize);
+        _expectedGroupByResultVer2[groupByVal - 1] = new CovarianceTuple(sumX, 
sumY, sumXY, groupSize);
+        sumX = 0;
+        sumY = 0;
+        sumGroupBy = 0;
+        sumXY = 0;
+        sumXGroupBy = 0;
+      }
+
+      sumX += doubleX;
+      sumY += doubleY;
+      sumGroupBy += groupByVal;
+      sumXY += doubleX * doubleY;
+      sumXGroupBy += doubleX * groupByVal;
+
+      floatCol[i] = floatVal;
+      groupByCol[i] = groupByVal;
+
+      // calculate inner segment results
+      _sumIntX += intX;
+      _sumIntY += intY;
+      _sumDoubleX += doubleX;
+      _sumDoubleY += doubleY;
+      _sumLong += longVal;
+      _sumFloat += floatVal;
+      _sumIntXY += intX * intY;
+      _sumDoubleXY += doubleX * doubleY;
+      _sumIntDouble += intX * doubleX;
+      _sumIntLong += intX * longVal;
+      _sumIntFloat += intX * floatCol[i];
+      _sumDoubleLong += doubleX * longVal;
+      _sumDoubleFloat += doubleX * floatCol[i];
+      _sumLongFloat += longVal * floatCol[i];
+
+      record.putValue(INT_COLUMN_X, intX);
+      record.putValue(INT_COLUMN_Y, intY);
+      record.putValue(DOUBLE_COLUMN_X, doubleX);
+      record.putValue(DOUBLE_COLUMN_Y, doubleY);
+      record.putValue(LONG_COLUMN, longVal);
+      record.putValue(FLOAT_COLUMN, floatVal);
+      record.putValue(GROUP_BY_COLUMN, groupByVal);
+      records.add(record);
+    }
+    _expectedGroupByResultVer1[groupByVal] = new CovarianceTuple(sumX, 
sumGroupBy, sumXGroupBy, groupSize);
+    _expectedGroupByResultVer2[groupByVal] = new CovarianceTuple(sumX, sumY, 
sumXY, groupSize);
+
+    // calculate inter segment result
+    Covariance cov = new Covariance();
+    double[] newIntColX = Arrays.stream(intColX).asDoubleStream().toArray();
+    double[] newIntColY = Arrays.stream(intColY).asDoubleStream().toArray();
+    double[] newLongCol = Arrays.stream(longCol).asDoubleStream().toArray();
+    _expectedCovIntXY = cov.covariance(newIntColX, newIntColY, false);
+    _expectedCovDoubleXY = cov.covariance(doubleColX, doubleColY, false);
+    _expectedCovIntDouble = cov.covariance(newIntColX, doubleColX, false);
+    _expectedCovIntLong = cov.covariance(newIntColX, newLongCol, false);
+    _expectedCovIntFloat = cov.covariance(newIntColX, floatCol, false);
+    _expectedCovDoubleLong = cov.covariance(doubleColX, newLongCol, false);
+    _expectedCovDoubleFloat = cov.covariance(doubleColX, floatCol, false);
+    _expectedCovLongFloat = cov.covariance(newLongCol, floatCol, false);
+
+    double[] filteredX = Arrays.copyOfRange(doubleColX, 0, NUM_RECORDS / 2);
+    double[] filteredY = Arrays.copyOfRange(doubleColY, 0, NUM_RECORDS / 2);
+    _expectedCovWithFilter = cov.covariance(filteredX, filteredY, false);
+
+    // calculate inter segment group by results
+    for (int i = 0; i < NUM_GROUPS; i++) {
+      double[] colX = Arrays.copyOfRange(doubleColX, i * groupSize, (i + 1) * 
groupSize);
+      double[] colGroupBy = Arrays.copyOfRange(groupByCol, i * groupSize, (i + 
1) * groupSize);
+      double[] colY = Arrays.copyOfRange(doubleColY, i * groupSize, (i + 1) * 
groupSize);
+      _expectedFinalResultVer1[i] = cov.covariance(colX, colGroupBy, false);
+      _expectedFinalResultVer2[i] = cov.covariance(colX, colY, false);
+    }
+
+    // generate testSegment
+    ImmutableSegment immutableSegment = setUpSingleSegment(records, 
SEGMENT_NAME);
+    _indexSegment = immutableSegment;
+    _indexSegments = Arrays.asList(immutableSegment, immutableSegment);
+
+    // divide testSegment into 4 distinct segments for distinct inter segment 
tests
+    // by doing so, we can avoid calculating global covariance again
+    _instances = new ArrayList<>();
+    int segmentSize = NUM_RECORDS / 4;
+    ImmutableSegment immutableSegment1 = setUpSingleSegment(records.subList(0, 
segmentSize), SEGMENT_NAME_1);
+    ImmutableSegment immutableSegment2 =
+        setUpSingleSegment(records.subList(segmentSize, segmentSize * 2), 
SEGMENT_NAME_2);
+    ImmutableSegment immutableSegment3 =
+        setUpSingleSegment(records.subList(segmentSize * 2, segmentSize * 3), 
SEGMENT_NAME_3);
+    ImmutableSegment immutableSegment4 =
+        setUpSingleSegment(records.subList(segmentSize * 3, NUM_RECORDS), 
SEGMENT_NAME_4);
+    // generate 2 instances each with 2 distinct segments
+    _instances.add(Arrays.asList(immutableSegment1, immutableSegment2));
+    _instances.add(Arrays.asList(immutableSegment3, immutableSegment4));
+  }
+
+  private ImmutableSegment setUpSingleSegment(List<GenericRow> recordSet, 
String segmentName)
+      throws Exception {
+    SegmentGeneratorConfig segmentGeneratorConfig = new 
SegmentGeneratorConfig(TABLE_CONFIG, SCHEMA);
+    segmentGeneratorConfig.setTableName(RAW_TABLE_NAME);
+    segmentGeneratorConfig.setSegmentName(segmentName);
+    segmentGeneratorConfig.setOutDir(INDEX_DIR.getPath());
+
+    SegmentIndexCreationDriverImpl driver = new 
SegmentIndexCreationDriverImpl();
+    driver.init(segmentGeneratorConfig, new GenericRowRecordReader(recordSet));
+    driver.build();
+
+    ImmutableSegment immutableSegment = ImmutableSegmentLoader.load(new 
File(INDEX_DIR, segmentName), ReadMode.mmap);
+    return immutableSegment;
+  }
+
+  @Test
+  public void testAggregationOnly() {
+    // Inner Segment
+    String query = "SELECT COV_POP(intColumnX, intColumnY), 
COV_POP(doubleColumnX, doubleColumnY), COV_POP(intColumnX, "
+        + "doubleColumnX), " + "COV_POP(intColumnX, longColumn), 
COV_POP(intColumnX, floatColumn), "
+        + "COV_POP(doubleColumnX, longColumn), COV_POP(doubleColumnX, 
floatColumn), COV_POP(longColumn, "
+        + "floatColumn)  FROM testTable";

Review Comment:
   Done



##########
pinot-segment-local/src/main/java/org/apache/pinot/segment/local/customobject/CovarianceTuple.java:
##########
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.pinot.segment.local.customobject;
+
+import java.nio.ByteBuffer;
+import javax.annotation.Nonnull;
+
+
+public class CovarianceTuple implements Comparable<CovarianceTuple> {

Review Comment:
   Done



##########
pinot-core/src/main/java/org/apache/pinot/core/query/aggregation/function/CovarianceAggregationFunction.java:
##########
@@ -199,6 +201,9 @@ public Double extractFinalResult(CovarianceTuple 
covarianceTuple) {
       double sumX = covarianceTuple.getSumX();
       double sumY = covarianceTuple.getSumY();
       double sumXY = covarianceTuple.getSumXY();
+      if (_isSample) {
+        return (sumXY / (count - 1)) - (sumX / (count - 1)) * (sumY / (count - 
1));

Review Comment:
   Done



##########
pinot-core/src/test/java/org/apache/pinot/queries/CovarianceQueriesTest.java:
##########
@@ -0,0 +1,444 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.pinot.queries;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.math3.stat.correlation.Covariance;
+import org.apache.commons.math3.util.Precision;
+import org.apache.pinot.common.response.broker.BrokerResponseNative;
+import org.apache.pinot.common.response.broker.ResultTable;
+import org.apache.pinot.core.common.Operator;
+import org.apache.pinot.core.operator.blocks.IntermediateResultsBlock;
+import org.apache.pinot.core.operator.query.AggregationGroupByOrderByOperator;
+import org.apache.pinot.core.operator.query.AggregationOperator;
+import 
org.apache.pinot.core.query.aggregation.groupby.AggregationGroupByResult;
+import org.apache.pinot.segment.local.customobject.CovarianceTuple;
+import 
org.apache.pinot.segment.local.indexsegment.immutable.ImmutableSegmentLoader;
+import 
org.apache.pinot.segment.local.segment.creator.impl.SegmentIndexCreationDriverImpl;
+import org.apache.pinot.segment.local.segment.readers.GenericRowRecordReader;
+import org.apache.pinot.segment.spi.ImmutableSegment;
+import org.apache.pinot.segment.spi.IndexSegment;
+import org.apache.pinot.segment.spi.creator.SegmentGeneratorConfig;
+import org.apache.pinot.spi.config.table.TableConfig;
+import org.apache.pinot.spi.config.table.TableType;
+import org.apache.pinot.spi.data.FieldSpec;
+import org.apache.pinot.spi.data.Schema;
+import org.apache.pinot.spi.data.readers.GenericRow;
+import org.apache.pinot.spi.utils.ReadMode;
+import org.apache.pinot.spi.utils.builder.TableConfigBuilder;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertNotNull;
+import static org.testng.Assert.assertTrue;
+
+
+/**
+ * Queries test for covariance queries.
+ */
+public class CovarianceQueriesTest extends BaseQueriesTest {
+  private static final File INDEX_DIR = new File(FileUtils.getTempDirectory(), 
"CovarianceQueriesTest");
+  private static final String RAW_TABLE_NAME = "testTable";
+  private static final String SEGMENT_NAME = "testSegment";
+
+  // test segments 1-4 evenly divide testSegment into 4 distinct segments
+  private static final String SEGMENT_NAME_1 = "testSegment1";
+  private static final String SEGMENT_NAME_2 = "testSegment2";
+  private static final String SEGMENT_NAME_3 = "testSegment3";
+  private static final String SEGMENT_NAME_4 = "testSegment4";
+
+  private static final int NUM_RECORDS = 2000;
+  private static final int NUM_GROUPS = 10;
+  private static final int MAX_VALUE = 500;
+  private static final double RELATIVE_EPSILON = 0.0001;
+  private static final double DELTA = 0.0001;
+
+  private static final String INT_COLUMN_X = "intColumnX";
+  private static final String INT_COLUMN_Y = "intColumnY";
+  private static final String DOUBLE_COLUMN_X = "doubleColumnX";
+  private static final String DOUBLE_COLUMN_Y = "doubleColumnY";
+  private static final String LONG_COLUMN = "longColumn";
+  private static final String FLOAT_COLUMN = "floatColumn";
+  private static final String GROUP_BY_COLUMN = "groupByColumn";
+
+  private static final Schema SCHEMA =
+      new Schema.SchemaBuilder().addSingleValueDimension(INT_COLUMN_X, 
FieldSpec.DataType.INT)
+          .addSingleValueDimension(INT_COLUMN_Y, FieldSpec.DataType.INT)
+          .addSingleValueDimension(DOUBLE_COLUMN_X, FieldSpec.DataType.DOUBLE)
+          .addSingleValueDimension(DOUBLE_COLUMN_Y, FieldSpec.DataType.DOUBLE)
+          .addSingleValueDimension(LONG_COLUMN, FieldSpec.DataType.LONG)
+          .addSingleValueDimension(FLOAT_COLUMN, FieldSpec.DataType.FLOAT)
+          .addSingleValueDimension(GROUP_BY_COLUMN, 
FieldSpec.DataType.DOUBLE).build();
+  private static final TableConfig TABLE_CONFIG =
+      new 
TableConfigBuilder(TableType.OFFLINE).setTableName(RAW_TABLE_NAME).build();
+
+  private IndexSegment _indexSegment;
+  private List<IndexSegment> _indexSegments;
+  private List<List<IndexSegment>> _instances;
+  private int _sumIntX = 0;
+  private int _sumIntY = 0;
+  private int _sumIntXY = 0;
+
+  private double _sumDoubleX = 0;
+  private double _sumDoubleY = 0;
+  private double _sumDoubleXY = 0;
+
+  private long _sumLong = 0L;
+  private double _sumFloat = 0;
+
+  private double _sumIntDouble = 0;
+  private long _sumIntLong = 0L;
+  private double _sumIntFloat = 0;
+  private double _sumDoubleLong = 0;
+  private double _sumDoubleFloat = 0;
+  private double _sumLongFloat = 0;
+
+  private double _expectedCovIntXY;
+  private double _expectedCovDoubleXY;
+  private double _expectedCovIntDouble;
+  private double _expectedCovIntLong;
+  private double _expectedCovIntFloat;
+  private double _expectedCovDoubleLong;
+  private double _expectedCovDoubleFloat;
+  private double _expectedCovLongFloat;
+
+  private double _expectedCovWithFilter;
+
+  private CovarianceTuple[] _expectedGroupByResultVer1 = new 
CovarianceTuple[NUM_GROUPS];
+  private CovarianceTuple[] _expectedGroupByResultVer2 = new 
CovarianceTuple[NUM_GROUPS];
+  private double[] _expectedFinalResultVer1 = new double[NUM_GROUPS];
+  private double[] _expectedFinalResultVer2 = new double[NUM_GROUPS];
+
+  @Override
+  protected String getFilter() {
+    // filter out half of the rows based on group id
+    return " WHERE groupByColumn < " + (NUM_GROUPS / 2);
+  }
+
+  @Override
+  protected IndexSegment getIndexSegment() {
+    return _indexSegment;
+  }
+
+  @Override
+  protected List<IndexSegment> getIndexSegments() {
+    return _indexSegments;
+  }
+
+  @BeforeClass
+  public void setUp()
+      throws Exception {
+    FileUtils.deleteDirectory(INDEX_DIR);
+
+    List<GenericRow> records = new ArrayList<>(NUM_RECORDS);
+
+    Random rand = new Random();
+    int[] intColX = rand.ints(NUM_RECORDS, -MAX_VALUE, MAX_VALUE).toArray();
+    int[] intColY = rand.ints(NUM_RECORDS, -MAX_VALUE, MAX_VALUE).toArray();
+    double[] doubleColX = rand.doubles(NUM_RECORDS, -MAX_VALUE, 
MAX_VALUE).toArray();
+    double[] doubleColY = rand.doubles(NUM_RECORDS, -MAX_VALUE, 
MAX_VALUE).toArray();
+    long[] longCol = rand.longs(NUM_RECORDS, -MAX_VALUE, MAX_VALUE).toArray();
+    double[] floatCol = new double[NUM_RECORDS];
+    double[] groupByCol = new double[NUM_RECORDS];
+
+    int groupSize = NUM_RECORDS / NUM_GROUPS;
+    double sumX = 0;
+    double sumY = 0;
+    double sumGroupBy = 0;
+    double sumXY = 0;
+    double sumXGroupBy = 0;
+    int groupByVal = 0;
+
+    for (int i = 0; i < NUM_RECORDS; i++) {
+      GenericRow record = new GenericRow();
+      int intX = intColX[i];
+      int intY = intColY[i];
+      double doubleX = doubleColX[i];
+      double doubleY = doubleColY[i];
+      long longVal = longCol[i];
+      float floatVal = -MAX_VALUE + rand.nextFloat() * 2 * MAX_VALUE;
+
+      // set up inner segment group by results
+      groupByVal = (int) Math.floor(i / groupSize);
+      if (i % groupSize == 0 && groupByVal > 0) {
+        _expectedGroupByResultVer1[groupByVal - 1] = new CovarianceTuple(sumX, 
sumGroupBy, sumXGroupBy, groupSize);
+        _expectedGroupByResultVer2[groupByVal - 1] = new CovarianceTuple(sumX, 
sumY, sumXY, groupSize);
+        sumX = 0;
+        sumY = 0;
+        sumGroupBy = 0;
+        sumXY = 0;
+        sumXGroupBy = 0;
+      }
+
+      sumX += doubleX;
+      sumY += doubleY;
+      sumGroupBy += groupByVal;
+      sumXY += doubleX * doubleY;
+      sumXGroupBy += doubleX * groupByVal;
+
+      floatCol[i] = floatVal;
+      groupByCol[i] = groupByVal;
+
+      // calculate inner segment results
+      _sumIntX += intX;
+      _sumIntY += intY;
+      _sumDoubleX += doubleX;
+      _sumDoubleY += doubleY;
+      _sumLong += longVal;
+      _sumFloat += floatVal;
+      _sumIntXY += intX * intY;
+      _sumDoubleXY += doubleX * doubleY;
+      _sumIntDouble += intX * doubleX;
+      _sumIntLong += intX * longVal;
+      _sumIntFloat += intX * floatCol[i];
+      _sumDoubleLong += doubleX * longVal;
+      _sumDoubleFloat += doubleX * floatCol[i];
+      _sumLongFloat += longVal * floatCol[i];
+
+      record.putValue(INT_COLUMN_X, intX);
+      record.putValue(INT_COLUMN_Y, intY);
+      record.putValue(DOUBLE_COLUMN_X, doubleX);
+      record.putValue(DOUBLE_COLUMN_Y, doubleY);
+      record.putValue(LONG_COLUMN, longVal);
+      record.putValue(FLOAT_COLUMN, floatVal);
+      record.putValue(GROUP_BY_COLUMN, groupByVal);
+      records.add(record);
+    }
+    _expectedGroupByResultVer1[groupByVal] = new CovarianceTuple(sumX, 
sumGroupBy, sumXGroupBy, groupSize);
+    _expectedGroupByResultVer2[groupByVal] = new CovarianceTuple(sumX, sumY, 
sumXY, groupSize);
+
+    // calculate inter segment result
+    Covariance cov = new Covariance();
+    double[] newIntColX = Arrays.stream(intColX).asDoubleStream().toArray();
+    double[] newIntColY = Arrays.stream(intColY).asDoubleStream().toArray();
+    double[] newLongCol = Arrays.stream(longCol).asDoubleStream().toArray();
+    _expectedCovIntXY = cov.covariance(newIntColX, newIntColY, false);
+    _expectedCovDoubleXY = cov.covariance(doubleColX, doubleColY, false);
+    _expectedCovIntDouble = cov.covariance(newIntColX, doubleColX, false);
+    _expectedCovIntLong = cov.covariance(newIntColX, newLongCol, false);
+    _expectedCovIntFloat = cov.covariance(newIntColX, floatCol, false);
+    _expectedCovDoubleLong = cov.covariance(doubleColX, newLongCol, false);
+    _expectedCovDoubleFloat = cov.covariance(doubleColX, floatCol, false);
+    _expectedCovLongFloat = cov.covariance(newLongCol, floatCol, false);
+
+    double[] filteredX = Arrays.copyOfRange(doubleColX, 0, NUM_RECORDS / 2);
+    double[] filteredY = Arrays.copyOfRange(doubleColY, 0, NUM_RECORDS / 2);
+    _expectedCovWithFilter = cov.covariance(filteredX, filteredY, false);
+
+    // calculate inter segment group by results
+    for (int i = 0; i < NUM_GROUPS; i++) {
+      double[] colX = Arrays.copyOfRange(doubleColX, i * groupSize, (i + 1) * 
groupSize);
+      double[] colGroupBy = Arrays.copyOfRange(groupByCol, i * groupSize, (i + 
1) * groupSize);
+      double[] colY = Arrays.copyOfRange(doubleColY, i * groupSize, (i + 1) * 
groupSize);
+      _expectedFinalResultVer1[i] = cov.covariance(colX, colGroupBy, false);
+      _expectedFinalResultVer2[i] = cov.covariance(colX, colY, false);
+    }
+
+    // generate testSegment
+    ImmutableSegment immutableSegment = setUpSingleSegment(records, 
SEGMENT_NAME);
+    _indexSegment = immutableSegment;
+    _indexSegments = Arrays.asList(immutableSegment, immutableSegment);
+
+    // divide testSegment into 4 distinct segments for distinct inter segment 
tests
+    // by doing so, we can avoid calculating global covariance again
+    _instances = new ArrayList<>();

Review Comment:
   Done - added getDistinctInstances and changed logic path to tigger 
getBrokerResponseDistinctInstances if there's more than 1 instances found.



##########
pinot-core/src/test/java/org/apache/pinot/queries/BaseQueriesTest.java:
##########
@@ -91,7 +95,9 @@ protected <T extends Operator> T getOperatorWithFilter(String 
query) {
   /**
    * Run query on multiple index segments.
    * <p>Use this to test the whole flow from server to broker.
-   * <p>The result should be equivalent to querying 4 identical index segments.
+   * <p>Unless explicitly override getDistinctInstances or initialize 2 
distinct index segments in test, the result

Review Comment:
   Done.



##########
pinot-segment-local/src/main/java/org/apache/pinot/segment/local/customobject/CovarianceTuple.java:
##########
@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.pinot.segment.local.customobject;
+
+import java.nio.ByteBuffer;
+import javax.annotation.Nonnull;
+
+
+/**
+ * Intermediate state used by CovarianceAggregationFunction which helps 
calculate
+ * population covariance and sample covariance
+ */
+public class CovarianceTuple implements Comparable<CovarianceTuple> {
+
+  private double _sumX;
+  private double _sumY;
+  private double _sumXY;
+  private long _count;
+
+  public CovarianceTuple(double sumX, double sumY, double sumXY, long count) {
+    _sumX = sumX;
+    _sumY = sumY;
+    _sumXY = sumXY;
+    _count = count;
+  }
+
+  public void apply(double sumX, double sumY, double sumXY, long count) {
+    _sumX += sumX;
+    _sumY += sumY;
+    _sumXY += sumXY;
+    _count += count;
+  }
+
+  public void apply(@Nonnull CovarianceTuple covarianceTuple) {
+    _sumX += covarianceTuple._sumX;
+    _sumY += covarianceTuple._sumY;
+    _sumXY += covarianceTuple._sumXY;
+    _count += covarianceTuple._count;
+  }
+
+  public double getSumX() {
+    return _sumX;
+  }
+
+  public double getSumY() {
+    return _sumY;
+  }
+
+  public double getSumXY() {
+    return _sumXY;
+  }
+
+  public long getCount() {
+    return _count;
+  }
+
+  @Nonnull
+  public byte[] toBytes() {
+    ByteBuffer byteBuffer = ByteBuffer.allocate(Double.BYTES + Double.BYTES + 
Double.BYTES + Long.BYTES);
+    byteBuffer.putDouble(_sumX);
+    byteBuffer.putDouble(_sumY);
+    byteBuffer.putDouble(_sumXY);
+    byteBuffer.putLong(_count);
+    return byteBuffer.array();
+  }
+
+  @Nonnull
+  public static CovarianceTuple fromBytes(byte[] bytes) {
+    return fromByteBuffer(ByteBuffer.wrap(bytes));
+  }
+
+  @Nonnull
+  public static CovarianceTuple fromByteBuffer(ByteBuffer byteBuffer) {
+    return new CovarianceTuple(byteBuffer.getDouble(), byteBuffer.getDouble(), 
byteBuffer.getDouble(),
+        byteBuffer.getLong());
+  }
+
+  @Override
+  public int compareTo(@Nonnull CovarianceTuple covarianceTuple) {

Review Comment:
   Thank you for pointing it out. For the context, I was trying to keep the 
return value (-1/1/0) consistent with other customobjects such as 
[AvgPair](https://github.com/apache/pinot/blob/master/pinot-segment-local/src/main/java/org/apache/pinot/segment/local/customobject/AvgPair.java#L71).



##########
pinot-core/src/main/java/org/apache/pinot/core/query/aggregation/function/CovarianceAggregationFunction.java:
##########
@@ -0,0 +1,225 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.pinot.core.query.aggregation.function;
+
+import com.google.common.base.Preconditions;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import org.apache.pinot.common.request.context.ExpressionContext;
+import org.apache.pinot.common.utils.DataSchema;
+import org.apache.pinot.core.common.BlockValSet;
+import org.apache.pinot.core.query.aggregation.AggregationResultHolder;
+import org.apache.pinot.core.query.aggregation.ObjectAggregationResultHolder;
+import org.apache.pinot.core.query.aggregation.groupby.GroupByResultHolder;
+import 
org.apache.pinot.core.query.aggregation.groupby.ObjectGroupByResultHolder;
+import org.apache.pinot.segment.local.customobject.CovarianceTuple;
+import org.apache.pinot.segment.spi.AggregationFunctionType;
+
+
+/**
+ * Aggregation function which returns the population covariance of 2 
expressions.
+ * COVAR_POP(exp1, exp2) = mean(exp1 * exp2) - mean(exp1) * mean(exp2)
+ * COVAR_SAMP(exp1, exp2) = (sum(exp1 * exp2) - sum(exp1) * sum(exp2)) / 
(count - 1)
+ */
+public class CovarianceAggregationFunction implements 
AggregationFunction<CovarianceTuple, Double> {
+  private static final double DEFAULT_FINAL_RESULT = Double.NEGATIVE_INFINITY;
+  protected final ExpressionContext _expression1;
+  protected final ExpressionContext _expression2;
+  protected final boolean _isSample;
+
+  public CovarianceAggregationFunction(List<ExpressionContext> arguments, 
boolean isSample) {
+    _expression1 = arguments.get(0);
+    _expression2 = arguments.get(1);
+    _isSample = isSample;
+  }
+
+  @Override
+  public AggregationFunctionType getType() {
+    return AggregationFunctionType.COVARPOP;
+  }
+
+  @Override
+  public String getColumnName() {
+    return getType().getName() + "_" + _expression1 + "_" + _expression2;
+  }
+
+  @Override
+  public String getResultColumnName() {
+    return getType().getName().toLowerCase() + "(" + _expression1 + "," + 
_expression2 + ")";
+  }
+
+  @Override
+  public List<ExpressionContext> getInputExpressions() {
+    ArrayList<ExpressionContext> inputExpressions = new ArrayList<>();
+    inputExpressions.add(_expression1);
+    inputExpressions.add(_expression2);
+    return inputExpressions;
+  }
+
+  @Override
+  public AggregationResultHolder createAggregationResultHolder() {
+    return new ObjectAggregationResultHolder();
+  }
+
+  @Override
+  public GroupByResultHolder createGroupByResultHolder(int initialCapacity, 
int maxCapacity) {
+    return new ObjectGroupByResultHolder(initialCapacity, maxCapacity);
+  }
+
+  @Override
+  public void aggregate(int length, AggregationResultHolder 
aggregationResultHolder,
+      Map<ExpressionContext, BlockValSet> blockValSetMap) {
+    double[] values1 = getValSet(blockValSetMap, _expression1);
+    double[] values2 = getValSet(blockValSetMap, _expression2);
+
+    double sumX = 0.0;
+    double sumY = 0.0;
+    double sumXY = 0.0;
+
+    for (int i = 0; i < length; i++) {
+      sumX += values1[i];
+      sumY += values2[i];
+      sumXY += values1[i] * values2[i];
+    }
+    setAggregationResult(aggregationResultHolder, sumX, sumY, sumXY, length);
+  }
+
+  protected void setAggregationResult(AggregationResultHolder 
aggregationResultHolder, double sumX, double sumY,
+      double sumXY, long count) {
+    CovarianceTuple covarianceTuple = aggregationResultHolder.getResult();
+    if (covarianceTuple == null) {
+      aggregationResultHolder.setValue(new CovarianceTuple(sumX, sumY, sumXY, 
count));
+    } else {
+      covarianceTuple.apply(sumX, sumY, sumXY, count);
+    }
+  }
+
+  protected void setGroupByResult(int groupKey, GroupByResultHolder 
groupByResultHolder, double sumX, double sumY,
+      double sumXY, long count) {
+    CovarianceTuple covarianceTuple = groupByResultHolder.getResult(groupKey);
+    if (covarianceTuple == null) {
+      groupByResultHolder.setValueForKey(groupKey, new CovarianceTuple(sumX, 
sumY, sumXY, count));
+    } else {
+      covarianceTuple.apply(sumX, sumY, sumXY, count);
+    }
+  }
+
+  private double[] getValSet(Map<ExpressionContext, BlockValSet> 
blockValSetMap, ExpressionContext expression) {
+    BlockValSet blockValSet = blockValSetMap.get(expression);
+    //TODO: Add MV support for covariance
+    Preconditions.checkState(blockValSet.isSingleValue(),
+        "Covariance function currently only supports single-valued column");
+    switch (blockValSet.getValueType().getStoredType()) {
+      case INT:
+      case LONG:
+      case FLOAT:
+      case DOUBLE:
+        return blockValSet.getDoubleValuesSV();
+      default:
+        throw new IllegalStateException(
+            "Cannot compute covariance for non-numeric type: " + 
blockValSet.getValueType());
+    }
+  }
+
+  @Override
+  public void aggregateGroupBySV(int length, int[] groupKeyArray, 
GroupByResultHolder groupByResultHolder,
+      Map<ExpressionContext, BlockValSet> blockValSetMap) {
+    double[] values1 = getValSet(blockValSetMap, _expression1);
+    double[] values2 = getValSet(blockValSetMap, _expression2);
+    for (int i = 0; i < length; i++) {
+      setGroupByResult(groupKeyArray[i], groupByResultHolder, values1[i], 
values2[i], values1[i] * values2[i], 1L);
+    }
+  }
+
+  @Override
+  public void aggregateGroupByMV(int length, int[][] groupKeysArray, 
GroupByResultHolder groupByResultHolder,
+      Map<ExpressionContext, BlockValSet> blockValSetMap) {
+    double[] values1 = getValSet(blockValSetMap, _expression1);
+    double[] values2 = getValSet(blockValSetMap, _expression2);
+    for (int i = 0; i < length; i++) {
+      for (int groupKey : groupKeysArray[i]) {
+        setGroupByResult(groupKey, groupByResultHolder, values1[i], 
values2[i], values1[i] * values2[i], 1L);
+      }
+    }
+  }
+
+  @Override
+  public CovarianceTuple extractAggregationResult(AggregationResultHolder 
aggregationResultHolder) {
+    CovarianceTuple covarianceTuple = aggregationResultHolder.getResult();
+    if (covarianceTuple == null) {
+      return new CovarianceTuple(0.0, 0.0, 0.0, 0L);
+    } else {
+      return covarianceTuple;
+    }
+  }
+
+  @Override
+  public CovarianceTuple extractGroupByResult(GroupByResultHolder 
groupByResultHolder, int groupKey) {
+    return groupByResultHolder.getResult(groupKey);
+  }
+
+  @Override
+  public CovarianceTuple merge(CovarianceTuple intermediateResult1, 
CovarianceTuple intermediateResult2) {
+    intermediateResult1.apply(intermediateResult2);
+    return intermediateResult1;
+  }
+
+  @Override
+  public DataSchema.ColumnDataType getIntermediateResultColumnType() {
+    return DataSchema.ColumnDataType.OBJECT;
+  }
+
+  @Override
+  public DataSchema.ColumnDataType getFinalResultColumnType() {
+    return DataSchema.ColumnDataType.DOUBLE;
+  }
+
+  @Override
+  public Double extractFinalResult(CovarianceTuple covarianceTuple) {
+    long count = covarianceTuple.getCount();
+    if (count == 0L) {
+      return DEFAULT_FINAL_RESULT;
+    } else {
+      double sumX = covarianceTuple.getSumX();

Review Comment:
   Done



##########
pinot-core/src/main/java/org/apache/pinot/core/query/aggregation/function/CovarianceAggregationFunction.java:
##########
@@ -0,0 +1,237 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.pinot.core.query.aggregation.function;
+
+import com.google.common.base.Preconditions;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import org.apache.pinot.common.request.context.ExpressionContext;
+import org.apache.pinot.common.utils.DataSchema;
+import org.apache.pinot.core.common.BlockValSet;
+import org.apache.pinot.core.query.aggregation.AggregationResultHolder;
+import org.apache.pinot.core.query.aggregation.ObjectAggregationResultHolder;
+import org.apache.pinot.core.query.aggregation.groupby.GroupByResultHolder;
+import 
org.apache.pinot.core.query.aggregation.groupby.ObjectGroupByResultHolder;
+import org.apache.pinot.segment.local.customobject.CovarianceTuple;
+import org.apache.pinot.segment.spi.AggregationFunctionType;
+
+
+/**
+ * Aggregation function which returns the population covariance of 2 
expressions.
+ * COVAR_POP(exp1, exp2) = mean(exp1 * exp2) - mean(exp1) * mean(exp2)
+ * COVAR_SAMP(exp1, exp2) = (sum(exp1 * exp2) - sum(exp1) * sum(exp2)) / 
(count - 1)
+ *
+ * Population covariance between two random variables X and Y is defined as 
either
+ * covarPop(X,Y) = E[(X - E[X]) * (Y - E[Y])] or
+ * covarPop(X,Y) = E[X*Y] - E[X] * E[Y],
+ * here E[X] represents mean of X
+ * @see <a href="https://en.wikipedia.org/wiki/Covariance";>Covariance</a>
+ * The calculations here are based on the second definition shown above.
+ * Sample covariance = covarPop(X, Y) * besselCorrection
+ * @see <a href="https://en.wikipedia.org/wiki/Bessel%27s_correction";>Bessel's 
correction</a>
+ */
+public class CovarianceAggregationFunction implements 
AggregationFunction<CovarianceTuple, Double> {
+  private static final double DEFAULT_FINAL_RESULT = Double.NEGATIVE_INFINITY;
+  protected final ExpressionContext _expression1;
+  protected final ExpressionContext _expression2;
+  protected final boolean _isSample;
+
+  public CovarianceAggregationFunction(List<ExpressionContext> arguments, 
boolean isSample) {
+    _expression1 = arguments.get(0);
+    _expression2 = arguments.get(1);
+    _isSample = isSample;
+  }
+
+  @Override
+  public AggregationFunctionType getType() {
+    if (_isSample) {
+      return AggregationFunctionType.COVARSAMP;
+    }
+    return AggregationFunctionType.COVARPOP;
+  }
+
+  @Override
+  public String getColumnName() {
+    return getType().getName() + "_" + _expression1 + "_" + _expression2;
+  }
+
+  @Override
+  public String getResultColumnName() {
+    return getType().getName().toLowerCase() + "(" + _expression1 + "," + 
_expression2 + ")";
+  }
+
+  @Override
+  public List<ExpressionContext> getInputExpressions() {
+    ArrayList<ExpressionContext> inputExpressions = new ArrayList<>();
+    inputExpressions.add(_expression1);
+    inputExpressions.add(_expression2);
+    return inputExpressions;
+  }
+
+  @Override
+  public AggregationResultHolder createAggregationResultHolder() {
+    return new ObjectAggregationResultHolder();
+  }
+
+  @Override
+  public GroupByResultHolder createGroupByResultHolder(int initialCapacity, 
int maxCapacity) {
+    return new ObjectGroupByResultHolder(initialCapacity, maxCapacity);
+  }
+
+  @Override
+  public void aggregate(int length, AggregationResultHolder 
aggregationResultHolder,
+      Map<ExpressionContext, BlockValSet> blockValSetMap) {
+    double[] values1 = getValSet(blockValSetMap, _expression1);
+    double[] values2 = getValSet(blockValSetMap, _expression2);
+
+    double sumX = 0.0;
+    double sumY = 0.0;
+    double sumXY = 0.0;
+
+    for (int i = 0; i < length; i++) {
+      sumX += values1[i];
+      sumY += values2[i];
+      sumXY += values1[i] * values2[i];
+    }
+    setAggregationResult(aggregationResultHolder, sumX, sumY, sumXY, length);
+  }
+
+  protected void setAggregationResult(AggregationResultHolder 
aggregationResultHolder, double sumX, double sumY,
+      double sumXY, long count) {
+    CovarianceTuple covarianceTuple = aggregationResultHolder.getResult();
+    if (covarianceTuple == null) {
+      aggregationResultHolder.setValue(new CovarianceTuple(sumX, sumY, sumXY, 
count));
+    } else {
+      covarianceTuple.apply(sumX, sumY, sumXY, count);
+    }
+  }
+
+  protected void setGroupByResult(int groupKey, GroupByResultHolder 
groupByResultHolder, double sumX, double sumY,
+      double sumXY, long count) {
+    CovarianceTuple covarianceTuple = groupByResultHolder.getResult(groupKey);
+    if (covarianceTuple == null) {
+      groupByResultHolder.setValueForKey(groupKey, new CovarianceTuple(sumX, 
sumY, sumXY, count));
+    } else {
+      covarianceTuple.apply(sumX, sumY, sumXY, count);
+    }
+  }
+
+  private double[] getValSet(Map<ExpressionContext, BlockValSet> 
blockValSetMap, ExpressionContext expression) {
+    BlockValSet blockValSet = blockValSetMap.get(expression);
+    //TODO: Add MV support for covariance
+    Preconditions.checkState(blockValSet.isSingleValue(),
+        "Covariance function currently only supports single-valued column");
+    switch (blockValSet.getValueType().getStoredType()) {
+      case INT:
+      case LONG:
+      case FLOAT:
+      case DOUBLE:
+        return blockValSet.getDoubleValuesSV();
+      default:
+        throw new IllegalStateException(
+            "Cannot compute covariance for non-numeric type: " + 
blockValSet.getValueType());
+    }
+  }
+
+  @Override
+  public void aggregateGroupBySV(int length, int[] groupKeyArray, 
GroupByResultHolder groupByResultHolder,
+      Map<ExpressionContext, BlockValSet> blockValSetMap) {
+    double[] values1 = getValSet(blockValSetMap, _expression1);
+    double[] values2 = getValSet(blockValSetMap, _expression2);
+    for (int i = 0; i < length; i++) {
+      setGroupByResult(groupKeyArray[i], groupByResultHolder, values1[i], 
values2[i], values1[i] * values2[i], 1L);
+    }
+  }
+
+  @Override
+  public void aggregateGroupByMV(int length, int[][] groupKeysArray, 
GroupByResultHolder groupByResultHolder,
+      Map<ExpressionContext, BlockValSet> blockValSetMap) {
+    double[] values1 = getValSet(blockValSetMap, _expression1);
+    double[] values2 = getValSet(blockValSetMap, _expression2);
+    for (int i = 0; i < length; i++) {
+      for (int groupKey : groupKeysArray[i]) {
+        setGroupByResult(groupKey, groupByResultHolder, values1[i], 
values2[i], values1[i] * values2[i], 1L);
+      }
+    }
+  }
+
+  @Override
+  public CovarianceTuple extractAggregationResult(AggregationResultHolder 
aggregationResultHolder) {
+    CovarianceTuple covarianceTuple = aggregationResultHolder.getResult();
+    if (covarianceTuple == null) {
+      return new CovarianceTuple(0.0, 0.0, 0.0, 0L);
+    } else {
+      return covarianceTuple;
+    }
+  }
+
+  @Override
+  public CovarianceTuple extractGroupByResult(GroupByResultHolder 
groupByResultHolder, int groupKey) {
+    return groupByResultHolder.getResult(groupKey);
+  }
+
+  @Override
+  public CovarianceTuple merge(CovarianceTuple intermediateResult1, 
CovarianceTuple intermediateResult2) {
+    intermediateResult1.apply(intermediateResult2);
+    return intermediateResult1;
+  }
+
+  @Override
+  public DataSchema.ColumnDataType getIntermediateResultColumnType() {
+    return DataSchema.ColumnDataType.OBJECT;
+  }
+
+  @Override
+  public DataSchema.ColumnDataType getFinalResultColumnType() {
+    return DataSchema.ColumnDataType.DOUBLE;
+  }
+
+  @Override
+  public Double extractFinalResult(CovarianceTuple covarianceTuple) {
+    long count = covarianceTuple.getCount();
+    if (count == 0L) {
+      return DEFAULT_FINAL_RESULT;
+    } else {
+      double sumX = covarianceTuple.getSumX();
+      double sumY = covarianceTuple.getSumY();
+      double sumXY = covarianceTuple.getSumXY();
+      double popCov = (sumXY / count) - (sumX / count) * (sumY / count);
+      double besselCorrection = count / (count - 1);
+      if (_isSample) {
+        return popCov * besselCorrection;
+      }
+      return popCov;
+    }

Review Comment:
   I see where you are coming from but 
   `[(sumXY / count) - (sumX / count) * (sumY / count)] * [count / (count - 
1)]` actually does not equal to `(sumXY / (count - 1)) - (sumX / (count - 1)) * 
(sumY / (count - 1))` - a subtle math mistake that I also missed the first 
time. Using multiplication with besselCorrection is clearer/correct in this 
case.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@pinot.apache.org
For additional commands, e-mail: commits-h...@pinot.apache.org

Reply via email to