epotyom commented on code in PR #13568:
URL: https://github.com/apache/lucene/pull/13568#discussion_r1711338640


##########
lucene/sandbox/src/java/org/apache/lucene/sandbox/facet/recorders/LongAggregationsFacetRecorder.java:
##########
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.sandbox.facet.recorders;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.internal.hppc.IntCursor;
+import org.apache.lucene.internal.hppc.IntObjectHashMap;
+import org.apache.lucene.sandbox.facet.abstracts.FacetLeafRecorder;
+import org.apache.lucene.sandbox.facet.abstracts.FacetRecorder;
+import org.apache.lucene.sandbox.facet.abstracts.FacetRollup;
+import org.apache.lucene.sandbox.facet.abstracts.OrdinalIterator;
+import org.apache.lucene.sandbox.facet.abstracts.Reducer;
+import org.apache.lucene.search.LongValues;
+import org.apache.lucene.search.LongValuesSource;
+
+/**
+ * {@link FacetRecorder} that computes multiple long aggregations per facet. 
TODO: [premature
+ * optimization idea] if instead of one array we keep aggregations in two 
LongVector (one for MAX
+ * aggregation and one for SUM) we can benefit from SIMD?
+ */
+public class LongAggregationsFacetRecorder implements FacetRecorder {
+
+  private IntObjectHashMap<long[]> perOrdinalValues;
+  private List<IntObjectHashMap<long[]>> leafValues;
+
+  private final LongValuesSource[] longValuesSources;
+  private final Reducer[] reducers;
+
+  /** Constructor. */
+  public LongAggregationsFacetRecorder(LongValuesSource[] longValuesSources, 
Reducer[] reducers) {
+    assert longValuesSources.length == reducers.length;
+    this.longValuesSources = longValuesSources;
+    this.reducers = reducers;
+    perOrdinalValues = new IntObjectHashMap<>();
+    leafValues = Collections.synchronizedList(new ArrayList<>());
+  }
+
+  @Override
+  public FacetLeafRecorder getLeafRecorder(LeafReaderContext context) throws 
IOException {
+    LongValues[] longValues = new LongValues[longValuesSources.length];
+    for (int i = 0; i < longValuesSources.length; i++) {
+      longValues[i] = longValuesSources[i].getValues(context, null);
+    }
+    IntObjectHashMap<long[]> valuesRecorder = new IntObjectHashMap<>();
+    leafValues.add(valuesRecorder);
+    return new LongAggregationsFacetLeafRecorder(longValues, reducers, 
valuesRecorder);
+  }
+
+  @Override
+  public OrdinalIterator recordedOrds() {
+    if (perOrdinalValues.isEmpty()) {
+      return null;
+    }
+    Iterator<IntCursor> ordIterator = perOrdinalValues.keys().iterator();
+    return new OrdinalIterator() {
+      @Override
+      public int nextOrd() throws IOException {
+        if (ordIterator.hasNext()) {
+          return ordIterator.next().value;
+        } else {
+          return NO_MORE_ORDS;
+        }
+      }
+    };
+  }
+
+  @Override
+  public boolean isEmpty() {
+    return perOrdinalValues.isEmpty();
+  }
+
+  @Override
+  public void reduce(FacetRollup facetRollup) throws IOException {
+    boolean firstElement = true;
+    for (IntObjectHashMap<long[]> leafValue : leafValues) {
+      if (firstElement) {
+        perOrdinalValues = leafValue;
+        firstElement = false;
+      } else {
+        for (IntObjectHashMap.IntObjectCursor<long[]> elem : leafValue) {
+          long[] vals = perOrdinalValues.get(elem.key);
+          if (vals == null) {
+            perOrdinalValues.put(elem.key, elem.value);
+          } else {
+            for (int i = 0; i < longValuesSources.length; i++) {
+              vals[i] = reducers[i].reduce(vals[i], elem.value[i]);
+            }
+          }
+        }
+      }
+    }
+    if (firstElement) {
+      // TODO: do we need empty map by default?
+      perOrdinalValues = new IntObjectHashMap<>();
+    }
+    // TODO: implement rollup
+    if (facetRollup != null
+        && facetRollup.getDimOrdsToRollup().nextOrd() != 
OrdinalIterator.NO_MORE_ORDS) {
+      throw new UnsupportedOperationException("Rollup is required, but not 
implemented");
+    }
+  }
+
+  public long getRecordedValue(int ord, int valuesId) {
+    if (valuesId < 0 || valuesId >= longValuesSources.length) {
+      throw new IllegalArgumentException("Invalid request for ordinal values");
+    }
+    long[] valuesForOrd = perOrdinalValues.get(ord);
+    if (valuesForOrd != null) {
+      return valuesForOrd[valuesId];
+    }
+    return -1; // TODO: missing value, what do we want to return? Zero might 
be a better option.

Review Comment:
   I gave it a second thought, and changed it to return `0` by default with the 
following explanation, please let me know what you think.
   
   ```
       // There are a few options what we can return here e.g. throw an 
exception, return hardcoded or
       // provided default value. It might be better API to do that instead of 
returning zero, but
       // there are two reasons why I think returning zero is the right 
compromise:
       // 1) recorder result is a map-like structure, and maps in java usually 
return default value
       // e.g. null or 0 rather than throw an exception when a key is missing.
       // 2) Handling correctly all missing value cases might be expensive, 
e.g. what if only one
       // aggregation for selected facet ordinal is missing, i.e. no docs that 
belong to this facet
       // ordinal have a value to aggregate? To handle that we would have to 
maintain missing values
       // during collection instead of using default array value - zero. I 
believe it is excessive and
       // most users are not going to use it anyway. Worst case scenario, we 
can add another public get
       // method that handles missing values later.
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@lucene.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@lucene.apache.org
For additional commands, e-mail: issues-h...@lucene.apache.org

Reply via email to