s1monw commented on code in PR #12622:
URL: https://github.com/apache/lucene/pull/12622#discussion_r1400320980


##########
lucene/core/src/java/org/apache/lucene/index/IndexWriter.java:
##########
@@ -3475,6 +3475,8 @@ public void addIndexesReaderMerge(MergePolicy.OneMerge 
merge) throws IOException
       merge.getMergeInfo().info.setUseCompoundFile(true);
     }
 
+    merge.setMergeInfo(merge.info);

Review Comment:
   did we have a test that realized that merge.info is missing?



##########
lucene/core/src/java/org/apache/lucene/index/IndexWriter.java:
##########
@@ -5160,20 +5177,74 @@ public int length() {
         }
         mergeReaders.add(wrappedReader);
       }
+
+      MergeState.DocMap[] reorderDocMaps = null;
+      if (config.getIndexSort() == null) {
+        // Create a merged view of the input segments. This effectively does 
the merge.
+        CodecReader mergedView = 
SlowCompositeCodecReaderWrapper.wrap(mergeReaders);
+        Sorter.DocMap docMap = merge.reorder(mergedView, directory);
+        if (docMap != null) {
+          reorderDocMaps = new MergeState.DocMap[mergeReaders.size()];
+          int docBase = 0;
+          int i = 0;
+          for (CodecReader reader : mergeReaders) {
+            final int currentDocBase = docBase;
+            reorderDocMaps[i] =
+                new MergeState.DocMap() {

Review Comment:
   maybe we can make DocMap an interface and then just use a lambda here and in 
other places. would look much nicer. I can also do that in  a different PR 
after this is in.



##########
lucene/core/src/java/org/apache/lucene/index/MergePolicy.java:
##########
@@ -385,11 +385,7 @@ public long totalBytesSize() {
      * not indicate the number of documents after the merge.
      */
     public int totalNumDocs() {
-      int total = 0;
-      for (SegmentCommitInfo info : segments) {
-        total += info.info.maxDoc();
-      }
-      return total;
+      return totalMaxDoc;

Review Comment:
   <3



##########
lucene/misc/src/java/org/apache/lucene/misc/index/BPReorderingMergePolicy.java:
##########
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.misc.index;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Map;
+import org.apache.lucene.index.CodecReader;
+import org.apache.lucene.index.FilterMergePolicy;
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.MergeTrigger;
+import org.apache.lucene.index.SegmentCommitInfo;
+import org.apache.lucene.index.SegmentInfos;
+import org.apache.lucene.index.Sorter;
+import org.apache.lucene.misc.index.BPIndexReorderer.NotEnoughRAMException;
+import org.apache.lucene.store.Directory;
+
+/**
+ * A merge policy that reorders merged segments according to a {@link 
BPIndexReorderer}. When
+ * reordering doesn't have enough RAM, it simply skips reordering in order not 
to fail the merge. So
+ * make sure to give enough RAM to your {@link BPIndexReorderer} via {@link
+ * BPIndexReorderer#setRAMBudgetMB(double)}.
+ */
+public final class BPReorderingMergePolicy extends FilterMergePolicy {
+
+  /** Whether a segment has been reordered. */
+  static final String REORDERED = "bp.reordered";
+
+  private final BPIndexReorderer reorderer;
+  private int minNaturalMergeNumDocs = 1;
+  private float minNaturalMergeRatioFromBiggestSegment = 0f;
+
+  /**
+   * Sole constructor. It takes the merge policy that should be used to 
compute merges, and will
+   * then reorder doc IDs from all merges above the configured minimum doc 
count, as well as all
+   * forced merges.
+   *
+   * <p>If you wish to only run reordering upon forced merges, pass {@link 
Integer#MAX_VALUE} as a
+   * {@code minNaturalMergeNumDocs}. Otherwise a default value of {@code 2^18 
= 262,144} is
+   * suggested. This should help retain merging optimizations on small merges 
while reordering the
+   * larger segments that are important for good search performance.
+   *
+   * @param in the merge policy to use to compute merges
+   * @param reorderer the {@link BPIndexReorderer} to use to renumber doc IDs
+   */
+  public BPReorderingMergePolicy(MergePolicy in, BPIndexReorderer reorderer) {
+    super(in);
+    this.reorderer = reorderer;
+  }
+
+  /**
+   * Set the minimum number of docs that a merge must have for the resulting 
segment to be
+   * reordered.
+   */
+  public void setMinNaturalMergeNumDocs(int minNaturalMergeNumDocs) {
+    if (minNaturalMergeNumDocs < 1) {
+      throw new IllegalArgumentException(
+          "minNaturalMergeNumDocs must be at least 1, got " + 
minNaturalMergeNumDocs);
+    }
+    this.minNaturalMergeNumDocs = minNaturalMergeNumDocs;
+  }
+
+  /**
+   * Set the minimum number of docs that a merge must have for the resulting 
segment to be
+   * reordered, as a ratio of the total number of documents of the current 
biggest segment in the
+   * index. This parameter helps only enable reordering on segments that are 
large enough that they
+   * will significantly contribute to overall search performance.
+   */
+  public void setMinNaturalMergeRatioFromBiggestSegment(
+      float minNaturalMergeRatioFromBiggestSegment) {
+    if (minNaturalMergeRatioFromBiggestSegment >= 0 == false
+        || minNaturalMergeRatioFromBiggestSegment < 1 == false) {
+      throw new IllegalArgumentException(
+          "minNaturalMergeRatioFromBiggestSegment must be in [0, 1), got "
+              + minNaturalMergeRatioFromBiggestSegment);
+    }
+    this.minNaturalMergeRatioFromBiggestSegment = 
minNaturalMergeRatioFromBiggestSegment;
+  }
+
+  private MergeSpecification maybeReorder(
+      MergeSpecification spec, boolean forced, SegmentInfos infos) {
+    if (spec == null) {
+      return null;
+    }
+
+    final int minNumDocs;
+    if (forced) {
+      // No minimum size for forced merges
+      minNumDocs = 1;
+    } else {
+      int maxMaxDoc = 0;
+      if (infos != null) {
+        for (SegmentCommitInfo sci : infos) {
+          maxMaxDoc = Math.max(sci.info.maxDoc(), maxMaxDoc);
+        }
+      }
+      minNumDocs =
+          Math.max(
+              this.minNaturalMergeNumDocs,
+              (int) ((double) minNaturalMergeRatioFromBiggestSegment * 
maxMaxDoc));
+    }
+
+    MergeSpecification newSpec = new MergeSpecification();
+    for (OneMerge oneMerge : spec.merges) {
+
+      newSpec.add(
+          new OneMerge(oneMerge) {
+
+            private boolean reordered = false;
+
+            @Override
+            public CodecReader wrapForMerge(CodecReader reader) throws 
IOException {
+              return oneMerge.wrapForMerge(reader);
+            }
+
+            @Override
+            public Sorter.DocMap reorder(CodecReader reader, Directory dir) 
throws IOException {
+              if (reader.numDocs() < minNumDocs) {

Review Comment:
   I guess it's style, but can we maybe only have a single return statement 
here?



##########
lucene/misc/src/java/org/apache/lucene/misc/index/BPReorderingMergePolicy.java:
##########
@@ -112,8 +116,12 @@ private MergeSpecification maybeReorder(
 
     MergeSpecification newSpec = new MergeSpecification();
     for (OneMerge oneMerge : spec.merges) {
+
       newSpec.add(
           new OneMerge(oneMerge) {
+
+            private boolean reordered = false;

Review Comment:
   maybe make that a setOnce?



##########
lucene/misc/src/java/org/apache/lucene/misc/index/BPReorderingMergePolicy.java:
##########
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.misc.index;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Map;
+import org.apache.lucene.index.CodecReader;
+import org.apache.lucene.index.FilterMergePolicy;
+import org.apache.lucene.index.MergePolicy;
+import org.apache.lucene.index.MergeTrigger;
+import org.apache.lucene.index.SegmentCommitInfo;
+import org.apache.lucene.index.SegmentInfos;
+import org.apache.lucene.index.Sorter;
+import org.apache.lucene.misc.index.BPIndexReorderer.NotEnoughRAMException;
+import org.apache.lucene.store.Directory;
+
+/**
+ * A merge policy that reorders merged segments according to a {@link 
BPIndexReorderer}. When
+ * reordering doesn't have enough RAM, it simply skips reordering in order not 
to fail the merge. So
+ * make sure to give enough RAM to your {@link BPIndexReorderer} via {@link
+ * BPIndexReorderer#setRAMBudgetMB(double)}.
+ */
+public final class BPReorderingMergePolicy extends FilterMergePolicy {
+
+  /** Whether a segment has been reordered. */
+  static final String REORDERED = "bp.reordered";
+
+  private final BPIndexReorderer reorderer;
+  private int minNaturalMergeNumDocs = 1;
+  private float minNaturalMergeRatioFromBiggestSegment = 0f;
+
+  /**
+   * Sole constructor. It takes the merge policy that should be used to 
compute merges, and will
+   * then reorder doc IDs from all merges above the configured minimum doc 
count, as well as all
+   * forced merges.
+   *
+   * <p>If you wish to only run reordering upon forced merges, pass {@link 
Integer#MAX_VALUE} as a
+   * {@code minNaturalMergeNumDocs}. Otherwise a default value of {@code 2^18 
= 262,144} is
+   * suggested. This should help retain merging optimizations on small merges 
while reordering the
+   * larger segments that are important for good search performance.
+   *
+   * @param in the merge policy to use to compute merges
+   * @param reorderer the {@link BPIndexReorderer} to use to renumber doc IDs
+   */
+  public BPReorderingMergePolicy(MergePolicy in, BPIndexReorderer reorderer) {
+    super(in);
+    this.reorderer = reorderer;
+  }
+
+  /**
+   * Set the minimum number of docs that a merge must have for the resulting 
segment to be
+   * reordered.
+   */
+  public void setMinNaturalMergeNumDocs(int minNaturalMergeNumDocs) {
+    if (minNaturalMergeNumDocs < 1) {
+      throw new IllegalArgumentException(
+          "minNaturalMergeNumDocs must be at least 1, got " + 
minNaturalMergeNumDocs);
+    }
+    this.minNaturalMergeNumDocs = minNaturalMergeNumDocs;
+  }
+
+  /**
+   * Set the minimum number of docs that a merge must have for the resulting 
segment to be
+   * reordered, as a ratio of the total number of documents of the current 
biggest segment in the
+   * index. This parameter helps only enable reordering on segments that are 
large enough that they
+   * will significantly contribute to overall search performance.
+   */
+  public void setMinNaturalMergeRatioFromBiggestSegment(
+      float minNaturalMergeRatioFromBiggestSegment) {
+    if (minNaturalMergeRatioFromBiggestSegment >= 0 == false
+        || minNaturalMergeRatioFromBiggestSegment < 1 == false) {
+      throw new IllegalArgumentException(
+          "minNaturalMergeRatioFromBiggestSegment must be in [0, 1), got "
+              + minNaturalMergeRatioFromBiggestSegment);
+    }
+    this.minNaturalMergeRatioFromBiggestSegment = 
minNaturalMergeRatioFromBiggestSegment;
+  }
+
+  private MergeSpecification maybeReorder(
+      MergeSpecification spec, boolean forced, SegmentInfos infos) {
+    if (spec == null) {
+      return null;
+    }
+
+    final int minNumDocs;
+    if (forced) {
+      // No minimum size for forced merges
+      minNumDocs = 1;
+    } else {
+      int maxMaxDoc = 0;
+      if (infos != null) {
+        for (SegmentCommitInfo sci : infos) {
+          maxMaxDoc = Math.max(sci.info.maxDoc(), maxMaxDoc);
+        }
+      }
+      minNumDocs =
+          Math.max(
+              this.minNaturalMergeNumDocs,
+              (int) ((double) minNaturalMergeRatioFromBiggestSegment * 
maxMaxDoc));
+    }
+
+    MergeSpecification newSpec = new MergeSpecification();
+    for (OneMerge oneMerge : spec.merges) {
+
+      newSpec.add(
+          new OneMerge(oneMerge) {
+
+            private boolean reordered = false;
+
+            @Override
+            public CodecReader wrapForMerge(CodecReader reader) throws 
IOException {
+              return oneMerge.wrapForMerge(reader);
+            }
+
+            @Override
+            public Sorter.DocMap reorder(CodecReader reader, Directory dir) 
throws IOException {
+              if (reader.numDocs() < minNumDocs) {
+                return null;
+              }
+
+              try {
+                Sorter.DocMap docMap = reorderer.computeDocMap(reader, dir);
+                reordered = true;
+                return docMap;
+              } catch (
+                  @SuppressWarnings("unused")
+                  NotEnoughRAMException e) {
+                // skip reordering, we don't have enough RAM anyway
+                return null;
+              }
+            }
+
+            @Override
+            public void setMergeInfo(SegmentCommitInfo info) {
+              info.info.addDiagnostics(
+                  Collections.singletonMap("bp.reordered", 
Boolean.toString(reordered)));

Review Comment:
   use the constant here `REORDERED`?



##########
lucene/core/src/java/org/apache/lucene/index/IndexWriter.java:
##########
@@ -5160,20 +5177,74 @@ public int length() {
         }
         mergeReaders.add(wrappedReader);
       }
+
+      MergeState.DocMap[] reorderDocMaps = null;
+      if (config.getIndexSort() == null) {
+        // Create a merged view of the input segments. This effectively does 
the merge.
+        CodecReader mergedView = 
SlowCompositeCodecReaderWrapper.wrap(mergeReaders);
+        Sorter.DocMap docMap = merge.reorder(mergedView, directory);
+        if (docMap != null) {
+          reorderDocMaps = new MergeState.DocMap[mergeReaders.size()];
+          int docBase = 0;
+          int i = 0;
+          for (CodecReader reader : mergeReaders) {
+            final int currentDocBase = docBase;
+            reorderDocMaps[i] =
+                new MergeState.DocMap() {
+                  @Override
+                  public int get(int docID) {
+                    Objects.checkIndex(docID, reader.maxDoc());
+                    return docMap.oldToNew(currentDocBase + docID);
+                  }
+                };
+            i++;
+            docBase += reader.maxDoc();
+          }
+          // This makes merging more expensive as it disables some bulk 
merging optimizations, so
+          // only do this if a non-null DocMap is returned.
+          mergeReaders =
+              Collections.singletonList(SortingCodecReader.wrap(mergedView, 
docMap, null));
+        }
+      }
+
       final SegmentMerger merger =
           new SegmentMerger(
               mergeReaders, merge.info.info, infoStream, dirWrapper, 
globalFieldNumberMap, context);
       merge.info.setSoftDelCount(Math.toIntExact(softDeleteCount.get()));
       merge.checkAborted();
 
+      MergeState mergeState = merger.mergeState;
+      MergeState.DocMap[] docMaps;
+      if (reorderDocMaps == null) {
+        docMaps = mergeState.docMaps;
+      } else {
+        // Since the reader was reordered, we passed a merged view to 
MergeState and from its
+        // perspective there is a single input segment to the merge and the
+        // SlowCompositeCodecReaderWrapper is effectively doing the merge.
+        assert mergeState.docMaps.length == 1;

Review Comment:
   can you put the length in the message. It will be helpful if we run in to an 
error here



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@lucene.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@lucene.apache.org
For additional commands, e-mail: issues-h...@lucene.apache.org

Reply via email to