jpountz commented on code in PR #12573:
URL: https://github.com/apache/lucene/pull/12573#discussion_r1332932759


##########
lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java:
##########
@@ -197,6 +183,160 @@ boolean any() {
 
   @Override
   public long ramBytesUsed() {
-    return bytesUsed.get() + fieldUpdatesBytesUsed.get() + 
termsBytesUsed.get();
+    return bytesUsed.get() + fieldUpdatesBytesUsed.get() + 
deleteTerms.ramBytesUsed();
+  }
+
+  static class DeletedTerms implements Accountable {
+
+    private final Counter bytesUsed = Counter.newCounter();
+    private final ByteBlockPool pool =
+        new ByteBlockPool(new 
ByteBlockPool.DirectTrackingAllocator(bytesUsed));
+    private final Map<String, BytesRefIntMap> deleteTerms = new HashMap<>();
+    private int termsSize = 0;
+
+    DeletedTerms() {}
+
+    /**
+     * Get the newest doc id of the deleted term.
+     *
+     * @param term The deleted term.
+     * @return The newest doc id of this deleted term.
+     */
+    int get(Term term) {
+      BytesRefIntMap hash = deleteTerms.get(term.field);
+      if (hash == null) {
+        return -1;
+      }
+      return hash.get(term.bytes);
+    }
+
+    /**
+     * Put the newest doc id of the deleted term.
+     *
+     * @param term The deleted term.
+     * @param value The newest doc id of the deleted term.
+     */
+    void put(Term term, int value) {
+      BytesRefIntMap hash =
+          deleteTerms.computeIfAbsent(
+              term.field,
+              k -> {
+                bytesUsed.addAndGet(RamUsageEstimator.sizeOf(term.field));
+                return new BytesRefIntMap(pool, bytesUsed);
+              });
+      int v = hash.put(term.bytes, value);
+      if (v == -1) {
+        termsSize++;
+      }
+    }
+
+    void clear() {
+      bytesUsed.addAndGet(-bytesUsed.get());
+      deleteTerms.clear();
+      termsSize = 0;
+    }
+
+    int size() {
+      return termsSize;
+    }
+
+    boolean isEmpty() {
+      return termsSize == 0;
+    }
+
+    /** Just for test, not efficient. */
+    Set<Term> keySet() {
+      return deleteTerms.entrySet().stream()
+          .flatMap(
+              entry -> entry.getValue().keySet().stream().map(b -> new 
Term(entry.getKey(), b)))
+          .collect(Collectors.toSet());
+    }
+
+    interface DeletedTermConsumer<E extends Exception> {
+      void accept(Term term, int docId) throws E;
+    }
+
+    /**
+     * Consume all terms in a sorted order.
+     *
+     * <p>Note: This is a destructive operation as it calls {@link 
BytesRefHash#sort()}.
+     *
+     * @see BytesRefHash#sort
+     */
+    <E extends Exception> void forEachOrdered(DeletedTermConsumer<E> consumer) 
throws E {
+      List<Map.Entry<String, BytesRefIntMap>> deleteFields =
+          new ArrayList<>(deleteTerms.entrySet());
+      deleteFields.sort(Map.Entry.comparingByKey());
+      Term scratch = new Term("", new BytesRef());
+      for (Map.Entry<String, BufferedUpdates.BytesRefIntMap> deleteFieldEntry 
: deleteFields) {
+        scratch.field = deleteFieldEntry.getKey();
+        BufferedUpdates.BytesRefIntMap terms = deleteFieldEntry.getValue();
+        int[] indices = terms.bytesRefHash.sort();
+        for (int index : indices) {
+          if (index != -1) {
+            terms.bytesRefHash.get(index, scratch.bytes);
+            consumer.accept(scratch, terms.values[index]);
+          }
+        }
+      }
+    }
+
+    @Override
+    public long ramBytesUsed() {
+      return bytesUsed.get();
+    }
+  }
+
+  private static class BytesRefIntMap {
+
+    private final Counter counter;
+    private final BytesRefHash bytesRefHash;
+    private int[] values;
+
+    private BytesRefIntMap(ByteBlockPool pool, Counter counter) {
+      this.counter = counter;
+      this.bytesRefHash =
+          new BytesRefHash(
+              pool,
+              BytesRefHash.DEFAULT_CAPACITY,
+              new 
BytesRefHash.DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, counter));
+      this.values = new int[BytesRefHash.DEFAULT_CAPACITY];
+      counter.addAndGet(BytesRefHash.DEFAULT_CAPACITY * Integer.BYTES);

Review Comment:
   Should we count a `NUM_BYTES_ARRAY_HEADER` too?



##########
lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java:
##########
@@ -197,6 +183,160 @@ boolean any() {
 
   @Override
   public long ramBytesUsed() {
-    return bytesUsed.get() + fieldUpdatesBytesUsed.get() + 
termsBytesUsed.get();
+    return bytesUsed.get() + fieldUpdatesBytesUsed.get() + 
deleteTerms.ramBytesUsed();
+  }
+
+  static class DeletedTerms implements Accountable {
+
+    private final Counter bytesUsed = Counter.newCounter();
+    private final ByteBlockPool pool =
+        new ByteBlockPool(new 
ByteBlockPool.DirectTrackingAllocator(bytesUsed));
+    private final Map<String, BytesRefIntMap> deleteTerms = new HashMap<>();
+    private int termsSize = 0;
+
+    DeletedTerms() {}
+
+    /**
+     * Get the newest doc id of the deleted term.
+     *
+     * @param term The deleted term.
+     * @return The newest doc id of this deleted term.
+     */
+    int get(Term term) {
+      BytesRefIntMap hash = deleteTerms.get(term.field);
+      if (hash == null) {
+        return -1;
+      }
+      return hash.get(term.bytes);
+    }
+
+    /**
+     * Put the newest doc id of the deleted term.
+     *
+     * @param term The deleted term.
+     * @param value The newest doc id of the deleted term.
+     */
+    void put(Term term, int value) {
+      BytesRefIntMap hash =
+          deleteTerms.computeIfAbsent(
+              term.field,
+              k -> {
+                bytesUsed.addAndGet(RamUsageEstimator.sizeOf(term.field));
+                return new BytesRefIntMap(pool, bytesUsed);
+              });
+      int v = hash.put(term.bytes, value);
+      if (v == -1) {
+        termsSize++;
+      }
+    }
+
+    void clear() {
+      bytesUsed.addAndGet(-bytesUsed.get());
+      deleteTerms.clear();
+      termsSize = 0;
+    }
+
+    int size() {
+      return termsSize;
+    }
+
+    boolean isEmpty() {
+      return termsSize == 0;
+    }
+
+    /** Just for test, not efficient. */
+    Set<Term> keySet() {
+      return deleteTerms.entrySet().stream()
+          .flatMap(
+              entry -> entry.getValue().keySet().stream().map(b -> new 
Term(entry.getKey(), b)))
+          .collect(Collectors.toSet());
+    }
+
+    interface DeletedTermConsumer<E extends Exception> {
+      void accept(Term term, int docId) throws E;

Review Comment:
   do we actually need generics here, or could it just be `IOException`?



##########
lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java:
##########
@@ -197,6 +183,160 @@ boolean any() {
 
   @Override
   public long ramBytesUsed() {
-    return bytesUsed.get() + fieldUpdatesBytesUsed.get() + 
termsBytesUsed.get();
+    return bytesUsed.get() + fieldUpdatesBytesUsed.get() + 
deleteTerms.ramBytesUsed();
+  }
+
+  static class DeletedTerms implements Accountable {
+
+    private final Counter bytesUsed = Counter.newCounter();
+    private final ByteBlockPool pool =
+        new ByteBlockPool(new 
ByteBlockPool.DirectTrackingAllocator(bytesUsed));
+    private final Map<String, BytesRefIntMap> deleteTerms = new HashMap<>();
+    private int termsSize = 0;
+
+    DeletedTerms() {}
+
+    /**
+     * Get the newest doc id of the deleted term.
+     *
+     * @param term The deleted term.
+     * @return The newest doc id of this deleted term.
+     */
+    int get(Term term) {
+      BytesRefIntMap hash = deleteTerms.get(term.field);
+      if (hash == null) {
+        return -1;
+      }
+      return hash.get(term.bytes);
+    }
+
+    /**
+     * Put the newest doc id of the deleted term.
+     *
+     * @param term The deleted term.
+     * @param value The newest doc id of the deleted term.
+     */
+    void put(Term term, int value) {
+      BytesRefIntMap hash =
+          deleteTerms.computeIfAbsent(
+              term.field,
+              k -> {
+                bytesUsed.addAndGet(RamUsageEstimator.sizeOf(term.field));
+                return new BytesRefIntMap(pool, bytesUsed);
+              });
+      int v = hash.put(term.bytes, value);
+      if (v == -1) {
+        termsSize++;
+      }
+    }
+
+    void clear() {
+      bytesUsed.addAndGet(-bytesUsed.get());
+      deleteTerms.clear();
+      termsSize = 0;
+    }
+
+    int size() {
+      return termsSize;
+    }
+
+    boolean isEmpty() {
+      return termsSize == 0;
+    }
+
+    /** Just for test, not efficient. */
+    Set<Term> keySet() {
+      return deleteTerms.entrySet().stream()
+          .flatMap(
+              entry -> entry.getValue().keySet().stream().map(b -> new 
Term(entry.getKey(), b)))
+          .collect(Collectors.toSet());
+    }
+
+    interface DeletedTermConsumer<E extends Exception> {
+      void accept(Term term, int docId) throws E;
+    }
+
+    /**
+     * Consume all terms in a sorted order.
+     *
+     * <p>Note: This is a destructive operation as it calls {@link 
BytesRefHash#sort()}.
+     *
+     * @see BytesRefHash#sort
+     */
+    <E extends Exception> void forEachOrdered(DeletedTermConsumer<E> consumer) 
throws E {
+      List<Map.Entry<String, BytesRefIntMap>> deleteFields =
+          new ArrayList<>(deleteTerms.entrySet());
+      deleteFields.sort(Map.Entry.comparingByKey());
+      Term scratch = new Term("", new BytesRef());
+      for (Map.Entry<String, BufferedUpdates.BytesRefIntMap> deleteFieldEntry 
: deleteFields) {
+        scratch.field = deleteFieldEntry.getKey();
+        BufferedUpdates.BytesRefIntMap terms = deleteFieldEntry.getValue();
+        int[] indices = terms.bytesRefHash.sort();
+        for (int index : indices) {
+          if (index != -1) {
+            terms.bytesRefHash.get(index, scratch.bytes);
+            consumer.accept(scratch, terms.values[index]);
+          }
+        }
+      }
+    }
+
+    @Override
+    public long ramBytesUsed() {
+      return bytesUsed.get();
+    }
+  }
+
+  private static class BytesRefIntMap {
+
+    private final Counter counter;
+    private final BytesRefHash bytesRefHash;
+    private int[] values;
+
+    private BytesRefIntMap(ByteBlockPool pool, Counter counter) {
+      this.counter = counter;
+      this.bytesRefHash =
+          new BytesRefHash(
+              pool,
+              BytesRefHash.DEFAULT_CAPACITY,
+              new 
BytesRefHash.DirectBytesStartArray(BytesRefHash.DEFAULT_CAPACITY, counter));
+      this.values = new int[BytesRefHash.DEFAULT_CAPACITY];
+      counter.addAndGet(BytesRefHash.DEFAULT_CAPACITY * Integer.BYTES);
+    }
+
+    private Set<BytesRef> keySet() {
+      BytesRef scratch = new BytesRef();
+      Set<BytesRef> set = new HashSet<>();
+      for (int i = 0; i < bytesRefHash.size(); i++) {
+        bytesRefHash.get(i, scratch);
+        set.add(BytesRef.deepCopyOf(scratch));
+      }
+      return set;
+    }
+
+    private int put(BytesRef key, int value) {
+      assert value >= 0;
+      int e = bytesRefHash.add(key);
+      if (e < 0) {
+        values[-e - 1] = value;
+        return value;

Review Comment:
   usually, put would return the previous value that was held, not the new one? 
Can we switch to return the previous value or change the return value to a 
boolean that says whether the value was added or replaced?



##########
lucene/core/src/java/org/apache/lucene/index/BufferedUpdates.java:
##########
@@ -139,15 +131,11 @@ public void addTerm(Term term, int docIDUpto) {
       return;
     }
 
-    deleteTerms.put(term, Integer.valueOf(docIDUpto));
-    // note that if current != null then it means there's already a buffered
+    deleteTerms.put(term, docIDUpto);
+    // note that if current != -1 then it means there's already a buffered
     // delete on that term, therefore we seem to over-count. this over-counting
     // is done to respect IndexWriterConfig.setMaxBufferedDeleteTerms.

Review Comment:
   unrelated to your change, but there seems to no longer be a 
`IndexWriterConfig.setMaxBufferedDeleteTerms` (let's address it separately)



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@lucene.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@lucene.apache.org
For additional commands, e-mail: issues-h...@lucene.apache.org

Reply via email to