gsmiller commented on code in PR #13974:
URL: https://github.com/apache/lucene/pull/13974#discussion_r1929422639


##########
lucene/sandbox/src/java/org/apache/lucene/sandbox/search/SortedSetMultiRangeQuery.java:
##########
@@ -0,0 +1,300 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.sandbox.search;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import org.apache.lucene.document.SortedSetDocValuesField;
+import org.apache.lucene.index.DocValues;
+import org.apache.lucene.index.DocValuesSkipper;
+import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.BooleanQuery;
+import org.apache.lucene.search.ConstantScoreScorer;
+import org.apache.lucene.search.ConstantScoreWeight;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.search.DocValuesRangeIterator;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.QueryVisitor;
+import org.apache.lucene.search.ScoreMode;
+import org.apache.lucene.search.Scorer;
+import org.apache.lucene.search.ScorerSupplier;
+import org.apache.lucene.search.TwoPhaseIterator;
+import org.apache.lucene.search.Weight;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.LongBitSet;
+
+/** A union multiple ranges over SortedSetDocValuesField */
+public class SortedSetMultiRangeQuery extends Query {
+  private final String field;
+  private final int bytesPerDim;
+  private final ArrayUtil.ByteArrayComparator comparator;
+  List<MultiRangeQuery.RangeClause> rangeClauses;
+
+  SortedSetMultiRangeQuery(
+      String name,
+      List<MultiRangeQuery.RangeClause> clauses,
+      int bytes,
+      ArrayUtil.ByteArrayComparator comparator) {
+    this.field = name;
+    this.rangeClauses = clauses;
+    this.bytesPerDim = bytes;
+    this.comparator = comparator;
+  }
+
+  /** Builder for creating a SortedSetMultiRangeQuery. */
+  public static class Builder {
+    private final String name;
+    protected final List<MultiRangeQuery.RangeClause> clauses = new 
ArrayList<>();
+    private final int bytes;
+    private final ArrayUtil.ByteArrayComparator comparator;
+
+    public Builder(String name, int bytes) {
+      this.name = Objects.requireNonNull(name);
+      this.bytes = bytes; // TODO assrt positive
+      this.comparator = ArrayUtil.getUnsignedComparator(bytes);
+    }
+
+    public Builder add(BytesRef lowerValue, BytesRef upperValue) {
+      byte[] low = lowerValue.clone().bytes;
+      byte[] up = upperValue.clone().bytes;
+      if (this.comparator.compare(low, 0, up, 0) > 0) {
+        throw new IllegalArgumentException("lowerValue must be <= upperValue");
+      } else {
+        clauses.add(new MultiRangeQuery.RangeClause(low, up));
+      }
+      return this;
+    }
+
+    public Query build() {
+      if (clauses.isEmpty()) {
+        return new BooleanQuery.Builder().build();
+      }
+      if (clauses.size() == 1) {
+        return SortedSetDocValuesField.newSlowRangeQuery(
+            name,
+            new BytesRef(clauses.getFirst().lowerValue),
+            new BytesRef(clauses.getFirst().upperValue),
+            true,
+            true);
+      }
+      return new SortedSetMultiRangeQuery(name, clauses, this.bytes, 
comparator);
+    }
+  }
+
+  @Override
+  public Query rewrite(IndexSearcher indexSearcher) throws IOException {
+    ArrayList<MultiRangeQuery.RangeClause> sortedClauses = new 
ArrayList<>(this.rangeClauses);
+    sortedClauses.sort(
+        (o1, o2) -> {
+          // if (result == 0) {
+          //    return comparator.compare(o1.upperValue, 0, o2.upperValue, 0);
+          // } else {
+          return comparator.compare(o1.lowerValue, 0, o2.lowerValue, 0);
+          // }
+        });
+    if (!this.rangeClauses.equals(sortedClauses)) {
+      return new SortedSetMultiRangeQuery(
+          this.field, sortedClauses, this.bytesPerDim, this.comparator);
+    } else {
+      return this;
+    }
+  }
+
+  @Override
+  public String toString(String fld) {
+    return "SortedSetMultiRangeQuery{"
+        + "field='"
+        + fld
+        + '\''
+        + ", rangeClauses="
+        + rangeClauses
+        + // TODO better toString
+        '}';
+  }
+
+  // what TODO with reverse ranges ???
+  @Override
+  public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, 
float boost)
+      throws IOException {
+    return new ConstantScoreWeight(this, boost) {
+      @Override
+      public ScorerSupplier scorerSupplier(LeafReaderContext context) throws 
IOException {
+        if (context.reader().getFieldInfos().fieldInfo(field) == null) {
+          return null;
+        }
+        DocValuesSkipper skipper = context.reader().getDocValuesSkipper(field);
+        SortedSetDocValues values = DocValues.getSortedSet(context.reader(), 
field);
+        // implement ScorerSupplier, since we do some expensive stuff to make 
a scorer
+        return new ScorerSupplier() {
+          @Override
+          public Scorer get(long leadCost) throws IOException {
+            if (rangeClauses.isEmpty()) {
+              return empty();
+            }
+            TermsEnum termsEnum = values.termsEnum();
+            LongBitSet matchingOrdsShifted = null;
+            long minOrd = 0, maxOrd = values.getValueCount() - 1;
+            long matchesAbove =
+                values.getValueCount(); // it's last range goes to maxOrd, by 
default - no match
+            long maxSeenOrd = values.getValueCount();
+            TermsEnum.SeekStatus seekStatus = TermsEnum.SeekStatus.NOT_FOUND;
+            for (int r = 0; r < rangeClauses.size(); r++) {
+              MultiRangeQuery.RangeClause range = rangeClauses.get(r);
+              long startingOrd;
+              seekStatus = termsEnum.seekCeil(new BytesRef(range.lowerValue));
+              if (matchingOrdsShifted == null) { // first iter
+                if (seekStatus == TermsEnum.SeekStatus.END) {
+                  return empty(); // no bitset yet, give up
+                }
+                minOrd = termsEnum.ord();
+                if (skipper != null) {
+                  minOrd = Math.max(minOrd, skipper.minValue());
+                  maxOrd = Math.min(maxOrd, skipper.maxValue());
+                }
+                if (maxOrd < minOrd) {
+                  return empty();
+                }
+                startingOrd = minOrd;
+              } else {
+                if (seekStatus == TermsEnum.SeekStatus.END) {
+                  break; // ranges - we are done, terms are exhausted
+                } else {
+                  startingOrd = termsEnum.ord();
+                }
+              }
+              byte[] upper = range.upperValue; // TODO ignore reverse ranges
+              // looking for overlap
+              for (int overlap = r + 1; overlap < rangeClauses.size(); 
overlap++, r++) {
+                MultiRangeQuery.RangeClause mayOverlap = 
rangeClauses.get(overlap);
+                assert comparator.compare(range.lowerValue, 0, 
mayOverlap.lowerValue, 0) <= 0
+                    : "since they are sorted";
+                // TODO it might be contiguous ranges, it's worth to check but 
I have no idea how
+                if (comparator.compare(mayOverlap.lowerValue, 0, upper, 0) <= 
0) {
+                  // overlap, expand if needed
+                  if (comparator.compare(upper, 0, mayOverlap.upperValue, 0) < 
0) {
+                    upper = mayOverlap.upperValue;
+                  }
+                  // continue; // skip overlapping rng
+                } else {
+                  break; // no r++
+                }
+              }
+              seekStatus = termsEnum.seekCeil(new BytesRef(upper));
+
+              if (seekStatus == TermsEnum.SeekStatus.END) {
+                maxSeenOrd = maxOrd; // perhaps it's worth to set for skipper
+                matchesAbove = startingOrd;
+                break; // no need to create bitset
+              }
+              maxSeenOrd =
+                  seekStatus == TermsEnum.SeekStatus.FOUND
+                      ? termsEnum.ord()
+                      : termsEnum.ord() - 1; // floor
+
+              if (matchingOrdsShifted == null) {
+                matchingOrdsShifted = new LongBitSet(maxOrd + 1 - minOrd);
+              }
+              matchingOrdsShifted.set(
+                  startingOrd - minOrd, maxSeenOrd - minOrd + 1); // up is 
exclusive
+            }
+            /// ranges are over, there might be no set!!
+            TwoPhaseIterator iterator;
+            long finalMatchesAbove = matchesAbove;
+            LongBitSet finalMatchingOrdsShifted = matchingOrdsShifted;
+            long finalMinOrd = minOrd;
+            iterator =
+                new TwoPhaseIterator(values) {
+                  // TODO unwrap singleton?
+                  @Override
+                  public boolean matches() throws IOException {
+                    for (int i = 0; i < values.docValueCount(); i++) {
+                      long ord = values.nextOrd();
+                      if (ord >= finalMinOrd
+                          && ((finalMatchesAbove < values.getValueCount()
+                                  && ord >= finalMatchesAbove)
+                              || finalMatchingOrdsShifted.get(ord - 
finalMinOrd))) {

Review Comment:
   >  I think just for some pathological queries. Imagine, many distinct ranges 
covered by the huge one, it may drop covered ranges from the future processing 
[...]
   
   It makes sense to me that we may want to do some optimizations for 
pathological cases with many ranges that overlap. I do wonder a bit on the 
importance of this. My reasoning here is that the cost only hits when creating 
the bitset, which is an upfront cost and not a per-doc cost. I'm not opposed to 
pursuing this, but I'm trying to find a way to balance some of the added code 
complexity. In this case, it looks like you're relying on a couple helper 
classes (`OrdRange` and `Edge`) along with a reasonably complex algorithm for 
finding all the overlaps (which to be perfectly honest, I haven't dug into, 
only glanced at).
   
   > [...] whether it be bitset or a tree set approach
   
   I'm not sure I understand this point completely. I think what you're saying 
is that you might want to follow up this PR with another approach that uses a 
tree set approach instead of a bitset for matching (I believe you had an 
iteration of this PR that introduced the idea). I wonder if a common solution 
for merging overlapping ranges might become more important later if we really 
do have multiple implementations? Maybe this is a candidate for a spin-off 
issue that gets pursued as a later PR? (This goes back to my motivation of 
trying to reduce complexity of this initial change until it becomes more 
obvious we need the added complexity).
   
   > [...] this step won't be available for NumericDVs and other w/o ords
   
   Ah yes, that's fair. I think this is a continued discussion of the last 
point. Maybe we can pursue some merging when we do have multiple 
implementations that could obviously benefit from something like this? I can an 
optimization becoming more important in these other situations since it will 
become a per-doc cost as opposed to an up-front setup cost. Again though, I 
wonder if this could be tackled as a spin-off issue once we have more 
implementations?
   
   > createOrdRanges [...]  at here some ranges might become adjacent, not 
really overlapping, but eligible for merging into bigger ones. I checked the 
code and comments there, there's no occurrence of overlap only adjacent.
   
   Are you suggesting that `createOrdRanges` as you currently have it 
implemented would not merge ranges that overlap? It looks to me like it would, 
but I might be missing something. If the ranges are sorted by their `min`, then 
I believe this method will merge away all the overlaps (as well as adjacent) 
wouldn't it? What do you think? Maybe I'm missing some cases or not properly 
wrapping my head around what you've got implemented here?
   
   Apologies if it feels like I'm pushing back on this too much. I'm not 
against pursuing these optimizations by any means, I'm just seeing if there's a 
way to simplify this PR so we can get it merged and follow up with spin-off 
issues. Please let me know what you think (and thanks for your patience!).



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@lucene.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@lucene.apache.org
For additional commands, e-mail: issues-h...@lucene.apache.org

Reply via email to