salvatorecampagna commented on code in PR #15378:
URL: https://github.com/apache/lucene/pull/15378#discussion_r2482378365


##########
lucene/core/src/test/org/apache/lucene/index/TestIndexWriterMerging.java:
##########
@@ -333,6 +337,330 @@ public synchronized void merge(MergeSource mergeSource, 
MergeTrigger trigger)
     public void close() {}
   }
 
+  public void testForceMergeDeletesWithObserver() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter indexer =
+        new IndexWriter(
+            dir,
+            newIndexWriterConfig(new MockAnalyzer(random()))
+                .setMaxBufferedDocs(2)
+                .setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH));
+
+    for (int i = 0; i < 10; i++) {
+      Document doc = new Document();
+      Field idField = newStringField("id", "" + i, Field.Store.NO);
+      doc.add(idField);
+      indexer.addDocument(doc);
+    }
+    indexer.close();
+
+    IndexReader beforeDeleteReader = DirectoryReader.open(dir);
+    assertEquals(10, beforeDeleteReader.maxDoc());
+    assertEquals(10, beforeDeleteReader.numDocs());
+    beforeDeleteReader.close();
+
+    IndexWriter deleter =
+        new IndexWriter(
+            dir,
+            newIndexWriterConfig(new MockAnalyzer(random()))
+                .setMergePolicy(NoMergePolicy.INSTANCE));
+    for (int i = 0; i < 10; i++) {
+      if (i % 2 == 0) {
+        deleter.deleteDocuments(new Term("id", "" + i));
+      }
+    }
+    deleter.close();
+
+    IndexReader afterDeleteReader = DirectoryReader.open(dir);
+    assertEquals(10, afterDeleteReader.maxDoc());
+    assertEquals(5, afterDeleteReader.numDocs());
+    afterDeleteReader.close();
+
+    IndexWriter iw =
+        new IndexWriter(
+            dir,
+            newIndexWriterConfig(new 
MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
+    assertEquals(10, iw.getDocStats().maxDoc);
+    assertEquals(5, iw.getDocStats().numDocs);
+    MergePolicy.MergeObserver observer = iw.forceMergeDeletes(false);
+
+    assertTrue(observer.hasNewMerges());
+    assertTrue(observer.numMerges() > 0);
+
+    // Measure time to detect stuck merges
+    long startNanos = System.nanoTime();
+    observer.await();
+    long elapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - 
startNanos);
+
+    assertTrue(
+        "Merge took too long: " + elapsedMillis + "ms (expected < 30000ms)",
+        elapsedMillis < 30_000);
+
+    assertEquals(5, iw.getDocStats().maxDoc);
+    assertEquals(5, iw.getDocStats().numDocs);
+
+    iw.waitForMerges();
+    iw.close();
+    dir.close();
+  }
+
+  public void testMergeObserverGetMerges() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter indexer =
+        new IndexWriter(
+            dir,
+            newIndexWriterConfig(new MockAnalyzer(random()))
+                .setMergePolicy(NoMergePolicy.INSTANCE));
+
+    for (int i = 0; i < 10; i++) {
+      Document doc = new Document();
+      Field idField = newStringField("id", "" + i, Field.Store.NO);
+      doc.add(idField);
+      indexer.addDocument(doc);
+    }
+    indexer.close();
+
+    IndexWriter deleter =
+        new IndexWriter(
+            dir,
+            newIndexWriterConfig(new 
MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
+    for (int i = 0; i < 2; i++) {
+      deleter.deleteDocuments(new Term("id", "" + i));
+    }
+
+    MergePolicy.MergeObserver observer = deleter.forceMergeDeletes(false);
+    assertTrue(observer.hasNewMerges());
+    assertTrue(observer.numMerges() > 0);
+
+    int numMerges = observer.numMerges();
+    for (int j = 0; j < numMerges; j++) {
+      MergePolicy.OneMerge oneMerge = observer.getMerge(j);
+      assertNotNull(oneMerge);
+    }
+
+    try {
+      observer.getMerge(-1);
+      fail("Should throw IndexOutOfBoundsException");
+    } catch (IndexOutOfBoundsException expected) {
+      String message = expected.getMessage();
+      assertTrue(
+          "Message should mention 'out of bounds', got: " + message,
+          message.contains("Index -1 out of bounds for length 1"));
+    }
+
+    try {
+      observer.getMerge(numMerges);
+      fail("Should throw IndexOutOfBoundsException");
+    } catch (IndexOutOfBoundsException expected) {
+      String message = expected.getMessage();
+      assertTrue(
+          "Message should mention 'out of bounds', got: " + message,
+          message.contains("Index " + numMerges + " out of bounds for length 
1"));
+    }
+
+    deleter.waitForMerges();
+    deleter.close();
+    dir.close();
+  }
+
+  public void testMergeObserverNoMerges() throws IOException {
+    Directory dir = newDirectory();
+    IndexWriter writer =
+        new IndexWriter(
+            dir,
+            newIndexWriterConfig(new MockAnalyzer(random()))
+                .setMergePolicy(NoMergePolicy.INSTANCE));
+
+    Document doc = new Document();
+    doc.add(newStringField("id", "1", Field.Store.NO));
+    writer.addDocument(doc);
+    writer.commit();
+
+    MergePolicy.MergeObserver observer = writer.forceMergeDeletes(false);
+
+    assertFalse("Should have no merges when no deletions", 
observer.hasNewMerges());
+    assertEquals("Should have zero merges", 0, observer.numMerges());
+
+    try {
+      observer.getMerge(0);
+      fail("Should throw IndexOutOfBoundsException when no merges");
+    } catch (IndexOutOfBoundsException expected) {
+      String message = expected.getMessage();
+      assertTrue(
+          "Message should mention 'no merges', got: " + message,
+          message.contains("No merges available"));
+    }
+
+    writer.close();
+    dir.close();
+  }
+
+  public void testMergeObserverAwaitWithTimeout() throws Exception {
+    Directory dir = newDirectory();
+    IndexWriter iw =
+        new IndexWriter(
+            dir,
+            newIndexWriterConfig(new 
MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
+
+    for (int i = 0; i < 10; i++) {
+      Document doc = new Document();
+      doc.add(newStringField("id", "" + i, Field.Store.NO));
+      iw.addDocument(doc);
+    }
+    iw.commit();
+
+    iw.deleteDocuments(new Term("id", "0"));
+    iw.deleteDocuments(new Term("id", "1"));
+    iw.deleteDocuments(new Term("id", "2"));
+    iw.commit();
+
+    MergePolicy.MergeObserver observer = iw.forceMergeDeletes(false);
+
+    // Measure time to detect stuck merges
+    long startNanos = System.nanoTime();
+    assertTrue("await should complete before timeout", observer.await(10, 
TimeUnit.MINUTES));
+    long elapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - 
startNanos);
+
+    assertTrue(
+        "Merge took too long: " + elapsedMillis + "ms (expected < 30000ms)",
+        elapsedMillis < 30_000);

Review Comment:
   Correct. Calling `await()` without timeout then checking elapsed time 
separately doesn't make sense. Much cleaner to put the timeout directly in 
await():
   ```
   assertTrue(observer.await(30_000, MILLISECONDS))
   ```
   
   This mimics real usage where users will call `await()` with a reasonable 
timeout. The `30_000ms` is just a test value ensuring `await()` doesn't hang 
indefinitely. Since we're exposing `await()` APIs, testing they actually work 
seems reasonable to me.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to